diff --git a/2017/Kube-Virt-v004.html b/2017/Kube-Virt-v004.html index 95c0148cb8..1212bae1a1 100644 --- a/2017/Kube-Virt-v004.html +++ b/2017/Kube-Virt-v004.html @@ -225,7 +225,7 @@ diff --git a/2017/Kube-Virt-v010.html b/2017/Kube-Virt-v010.html index edaf7173f0..35e3932600 100644 --- a/2017/Kube-Virt-v010.html +++ b/2017/Kube-Virt-v010.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-1.html b/2017/This-Week-in-Kube-Virt-1.html index 758cc19a0f..c7c749a67b 100644 --- a/2017/This-Week-in-Kube-Virt-1.html +++ b/2017/This-Week-in-Kube-Virt-1.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-10-base-10.html b/2017/This-Week-in-Kube-Virt-10-base-10.html index 83411a1d27..f6ef20ce24 100644 --- a/2017/This-Week-in-Kube-Virt-10-base-10.html +++ b/2017/This-Week-in-Kube-Virt-10-base-10.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-11.html b/2017/This-Week-in-Kube-Virt-11.html index bbd10bde5d..3d4dc93bff 100644 --- a/2017/This-Week-in-Kube-Virt-11.html +++ b/2017/This-Week-in-Kube-Virt-11.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-12.html b/2017/This-Week-in-Kube-Virt-12.html index 8a61a6d040..11418efa77 100644 --- a/2017/This-Week-in-Kube-Virt-12.html +++ b/2017/This-Week-in-Kube-Virt-12.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-13.html b/2017/This-Week-in-Kube-Virt-13.html index dfc11f0701..e6daf42b82 100644 --- a/2017/This-Week-in-Kube-Virt-13.html +++ b/2017/This-Week-in-Kube-Virt-13.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-14.html b/2017/This-Week-in-Kube-Virt-14.html index 693eb6ef67..429ae05870 100644 --- a/2017/This-Week-in-Kube-Virt-14.html +++ b/2017/This-Week-in-Kube-Virt-14.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-15.html b/2017/This-Week-in-Kube-Virt-15.html index 77db785c75..ca6a9b1ae5 100644 --- a/2017/This-Week-in-Kube-Virt-15.html +++ b/2017/This-Week-in-Kube-Virt-15.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-2.html b/2017/This-Week-in-Kube-Virt-2.html index 0ef2ee25b9..28d1ce8271 100644 --- a/2017/This-Week-in-Kube-Virt-2.html +++ b/2017/This-Week-in-Kube-Virt-2.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-3.html b/2017/This-Week-in-Kube-Virt-3.html index 2b9faac40d..3f8b36dee5 100644 --- a/2017/This-Week-in-Kube-Virt-3.html +++ b/2017/This-Week-in-Kube-Virt-3.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-4.html b/2017/This-Week-in-Kube-Virt-4.html index c78de17a25..7ef4d2b939 100644 --- a/2017/This-Week-in-Kube-Virt-4.html +++ b/2017/This-Week-in-Kube-Virt-4.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-5.html b/2017/This-Week-in-Kube-Virt-5.html index b191cc6633..4ddc1f3791 100644 --- a/2017/This-Week-in-Kube-Virt-5.html +++ b/2017/This-Week-in-Kube-Virt-5.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-6.html b/2017/This-Week-in-Kube-Virt-6.html index 6f06cd9311..c46a35e993 100644 --- a/2017/This-Week-in-Kube-Virt-6.html +++ b/2017/This-Week-in-Kube-Virt-6.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-7.html b/2017/This-Week-in-Kube-Virt-7.html index bc2eadd607..a86cce6143 100644 --- a/2017/This-Week-in-Kube-Virt-7.html +++ b/2017/This-Week-in-Kube-Virt-7.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-8.html b/2017/This-Week-in-Kube-Virt-8.html index 991b43bae1..018d2324c7 100644 --- a/2017/This-Week-in-Kube-Virt-8.html +++ b/2017/This-Week-in-Kube-Virt-8.html @@ -225,7 +225,7 @@ diff --git a/2017/This-Week-in-Kube-Virt-9.html b/2017/This-Week-in-Kube-Virt-9.html index 8ec59626b1..710a6ae2f0 100644 --- a/2017/This-Week-in-Kube-Virt-9.html +++ b/2017/This-Week-in-Kube-Virt-9.html @@ -225,7 +225,7 @@ diff --git a/2017/role-of-libvirt.html b/2017/role-of-libvirt.html index 963dd37b45..00b4433be6 100644 --- a/2017/role-of-libvirt.html +++ b/2017/role-of-libvirt.html @@ -225,7 +225,7 @@ diff --git a/2017/technology-comparison.html b/2017/technology-comparison.html index 8d50fa25f8..31156f2eb3 100644 --- a/2017/technology-comparison.html +++ b/2017/technology-comparison.html @@ -225,7 +225,7 @@ diff --git a/2018/CDI-DataVolumes.html b/2018/CDI-DataVolumes.html index 88656c93db..d351a6d52e 100644 --- a/2018/CDI-DataVolumes.html +++ b/2018/CDI-DataVolumes.html @@ -225,7 +225,7 @@ diff --git a/2018/Deploying-KubeVirt-on-a-Single-oVirt-VM.html b/2018/Deploying-KubeVirt-on-a-Single-oVirt-VM.html index aa764cce11..8f3f7fe2bb 100644 --- a/2018/Deploying-KubeVirt-on-a-Single-oVirt-VM.html +++ b/2018/Deploying-KubeVirt-on-a-Single-oVirt-VM.html @@ -225,7 +225,7 @@ diff --git a/2018/Deploying-VMs-on-Kubernetes-GlusterFS-KubeVirt.html b/2018/Deploying-VMs-on-Kubernetes-GlusterFS-KubeVirt.html index 8afd1444e1..326629f2b0 100644 --- a/2018/Deploying-VMs-on-Kubernetes-GlusterFS-KubeVirt.html +++ b/2018/Deploying-VMs-on-Kubernetes-GlusterFS-KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2018/KVM-Using-Device-Plugins.html b/2018/KVM-Using-Device-Plugins.html index 5d6b093966..d4b7f14478 100644 --- a/2018/KVM-Using-Device-Plugins.html +++ b/2018/KVM-Using-Device-Plugins.html @@ -225,7 +225,7 @@ diff --git a/2018/Kube-Virt-v020.html b/2018/Kube-Virt-v020.html index ce12324e27..8987be166a 100644 --- a/2018/Kube-Virt-v020.html +++ b/2018/Kube-Virt-v020.html @@ -225,7 +225,7 @@ diff --git a/2018/KubeVirt-API-Access-Control.html b/2018/KubeVirt-API-Access-Control.html index 5f604ef5ee..a6e2a77318 100644 --- a/2018/KubeVirt-API-Access-Control.html +++ b/2018/KubeVirt-API-Access-Control.html @@ -225,7 +225,7 @@ diff --git a/2018/KubeVirt-Memory-Overcommit.html b/2018/KubeVirt-Memory-Overcommit.html index 8b1bc2b6be..100d2779a6 100644 --- a/2018/KubeVirt-Memory-Overcommit.html +++ b/2018/KubeVirt-Memory-Overcommit.html @@ -225,7 +225,7 @@ diff --git a/2018/KubeVirt-Network-Deep-Dive.html b/2018/KubeVirt-Network-Deep-Dive.html index b20352c83f..c8329b1c91 100644 --- a/2018/KubeVirt-Network-Deep-Dive.html +++ b/2018/KubeVirt-Network-Deep-Dive.html @@ -225,7 +225,7 @@ diff --git a/2018/KubeVirt-Network-Rehash.html b/2018/KubeVirt-Network-Rehash.html index 088a8b4820..8ec25299d8 100644 --- a/2018/KubeVirt-Network-Rehash.html +++ b/2018/KubeVirt-Network-Rehash.html @@ -225,7 +225,7 @@ diff --git a/2018/KubeVirt-objects.html b/2018/KubeVirt-objects.html index 5d9117e89f..d3e0ecaa22 100644 --- a/2018/KubeVirt-objects.html +++ b/2018/KubeVirt-objects.html @@ -225,7 +225,7 @@ diff --git a/2018/Kubevirt-v0.7.0.html b/2018/Kubevirt-v0.7.0.html index a24a8ebe47..09413ccb4d 100644 --- a/2018/Kubevirt-v0.7.0.html +++ b/2018/Kubevirt-v0.7.0.html @@ -225,7 +225,7 @@ diff --git a/2018/Non-Dockerized-Build.html b/2018/Non-Dockerized-Build.html index 5cff79e793..9abeef0f6b 100644 --- a/2018/Non-Dockerized-Build.html +++ b/2018/Non-Dockerized-Build.html @@ -225,7 +225,7 @@ diff --git a/2018/Proxy-vm-conclusion.html b/2018/Proxy-vm-conclusion.html index 3bf95ee4ab..36e86bad68 100644 --- a/2018/Proxy-vm-conclusion.html +++ b/2018/Proxy-vm-conclusion.html @@ -225,7 +225,7 @@ diff --git a/2018/Research-run-VMs-with-istio-service-mesh.html b/2018/Research-run-VMs-with-istio-service-mesh.html index c61fe805c7..8a733a50f3 100644 --- a/2018/Research-run-VMs-with-istio-service-mesh.html +++ b/2018/Research-run-VMs-with-istio-service-mesh.html @@ -225,7 +225,7 @@ diff --git a/2018/Run-Istio-with-kubevirt.html b/2018/Run-Istio-with-kubevirt.html index f18169e158..4ef8c6f892 100644 --- a/2018/Run-Istio-with-kubevirt.html +++ b/2018/Run-Istio-with-kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2018/Some-notes-on-some-highlights-of-v020.html b/2018/Some-notes-on-some-highlights-of-v020.html index cf44694a04..0ceb4d0b1f 100644 --- a/2018/Some-notes-on-some-highlights-of-v020.html +++ b/2018/Some-notes-on-some-highlights-of-v020.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-16-Holiday-Wrap-Up-Edition.html b/2018/This-Week-in-Kube-Virt-16-Holiday-Wrap-Up-Edition.html index 9a8a44487c..954bdfc9a5 100644 --- a/2018/This-Week-in-Kube-Virt-16-Holiday-Wrap-Up-Edition.html +++ b/2018/This-Week-in-Kube-Virt-16-Holiday-Wrap-Up-Edition.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-16-size-XL.html b/2018/This-Week-in-Kube-Virt-16-size-XL.html index e8c4f9ef34..770130944b 100644 --- a/2018/This-Week-in-Kube-Virt-16-size-XL.html +++ b/2018/This-Week-in-Kube-Virt-16-size-XL.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-17.html b/2018/This-Week-in-Kube-Virt-17.html index 95876eb187..215f0b9dfb 100644 --- a/2018/This-Week-in-Kube-Virt-17.html +++ b/2018/This-Week-in-Kube-Virt-17.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-18.html b/2018/This-Week-in-Kube-Virt-18.html index 45aef9cc43..372aff3a8d 100644 --- a/2018/This-Week-in-Kube-Virt-18.html +++ b/2018/This-Week-in-Kube-Virt-18.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-19.html b/2018/This-Week-in-Kube-Virt-19.html index 13bea2675a..e3f10d415a 100644 --- a/2018/This-Week-in-Kube-Virt-19.html +++ b/2018/This-Week-in-Kube-Virt-19.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-20.html b/2018/This-Week-in-Kube-Virt-20.html index f007dc511d..935c17d6f2 100644 --- a/2018/This-Week-in-Kube-Virt-20.html +++ b/2018/This-Week-in-Kube-Virt-20.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-21.html b/2018/This-Week-in-Kube-Virt-21.html index e6fcf7fe17..122b173241 100644 --- a/2018/This-Week-in-Kube-Virt-21.html +++ b/2018/This-Week-in-Kube-Virt-21.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-22.html b/2018/This-Week-in-Kube-Virt-22.html index d963371229..f063d9a43b 100644 --- a/2018/This-Week-in-Kube-Virt-22.html +++ b/2018/This-Week-in-Kube-Virt-22.html @@ -225,7 +225,7 @@ diff --git a/2018/This-Week-in-Kube-Virt-23.html b/2018/This-Week-in-Kube-Virt-23.html index 26d1a9a152..4fdb05952c 100644 --- a/2018/This-Week-in-Kube-Virt-23.html +++ b/2018/This-Week-in-Kube-Virt-23.html @@ -225,7 +225,7 @@ diff --git a/2018/Unit-Test-Howto.html b/2018/Unit-Test-Howto.html index 462cc68788..da70e6e895 100644 --- a/2018/Unit-Test-Howto.html +++ b/2018/Unit-Test-Howto.html @@ -225,7 +225,7 @@ diff --git a/2018/Use-GlusterFS-Cloning-with-KubeVirt.html b/2018/Use-GlusterFS-Cloning-with-KubeVirt.html index 5bc8a426a6..b8496ecd9a 100644 --- a/2018/Use-GlusterFS-Cloning-with-KubeVirt.html +++ b/2018/Use-GlusterFS-Cloning-with-KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2018/Use-VS-Code-for-Kube-Virt-Development.html b/2018/Use-VS-Code-for-Kube-Virt-Development.html index 02c86738c0..f9586c635a 100644 --- a/2018/Use-VS-Code-for-Kube-Virt-Development.html +++ b/2018/Use-VS-Code-for-Kube-Virt-Development.html @@ -225,7 +225,7 @@ diff --git a/2018/attaching-to-multiple-networks.html b/2018/attaching-to-multiple-networks.html index c8d89799a2..75fbe9e3fb 100644 --- a/2018/attaching-to-multiple-networks.html +++ b/2018/attaching-to-multiple-networks.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.10.0.html b/2018/changelog-v0.10.0.html index f3c76f4bb3..91eccfbad6 100644 --- a/2018/changelog-v0.10.0.html +++ b/2018/changelog-v0.10.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.11.0.html b/2018/changelog-v0.11.0.html index f9e350b383..3024810480 100644 --- a/2018/changelog-v0.11.0.html +++ b/2018/changelog-v0.11.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.3.0.html b/2018/changelog-v0.3.0.html index 57c7b0f732..1fa4e7d21a 100644 --- a/2018/changelog-v0.3.0.html +++ b/2018/changelog-v0.3.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.4.0.html b/2018/changelog-v0.4.0.html index 7ba788b0b2..c8628e92cd 100644 --- a/2018/changelog-v0.4.0.html +++ b/2018/changelog-v0.4.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.5.0.html b/2018/changelog-v0.5.0.html index 3124c918b7..021abb3bef 100644 --- a/2018/changelog-v0.5.0.html +++ b/2018/changelog-v0.5.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.6.0.html b/2018/changelog-v0.6.0.html index 36320ad23b..8bac252e13 100644 --- a/2018/changelog-v0.6.0.html +++ b/2018/changelog-v0.6.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.7.0.html b/2018/changelog-v0.7.0.html index 1c3172779e..8b6f9ff758 100644 --- a/2018/changelog-v0.7.0.html +++ b/2018/changelog-v0.7.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.8.0.html b/2018/changelog-v0.8.0.html index fa4e93438f..3e15dff24b 100644 --- a/2018/changelog-v0.8.0.html +++ b/2018/changelog-v0.8.0.html @@ -225,7 +225,7 @@ diff --git a/2018/changelog-v0.9.0.html b/2018/changelog-v0.9.0.html index 353c01c4cb..ea1de2832b 100644 --- a/2018/changelog-v0.9.0.html +++ b/2018/changelog-v0.9.0.html @@ -225,7 +225,7 @@ diff --git a/2018/containerized-data-importer.html b/2018/containerized-data-importer.html index d186d2300e..781c4b1e97 100644 --- a/2018/containerized-data-importer.html +++ b/2018/containerized-data-importer.html @@ -225,7 +225,7 @@ diff --git a/2018/ignition-support.html b/2018/ignition-support.html index 47dee3b388..33cd6e39d5 100644 --- a/2018/ignition-support.html +++ b/2018/ignition-support.html @@ -225,7 +225,7 @@ diff --git a/2018/kubevirt-at-kubecon-na.html b/2018/kubevirt-at-kubecon-na.html index 685101966e..144801c676 100644 --- a/2018/kubevirt-at-kubecon-na.html +++ b/2018/kubevirt-at-kubecon-na.html @@ -225,7 +225,7 @@ diff --git a/2018/kubevirt-autolatest.html b/2018/kubevirt-autolatest.html index c81850bd9e..b95bb351bd 100644 --- a/2018/kubevirt-autolatest.html +++ b/2018/kubevirt-autolatest.html @@ -225,7 +225,7 @@ diff --git a/2018/kubevirtci.html b/2018/kubevirtci.html index 359e5b4409..7fe1273dde 100644 --- a/2018/kubevirtci.html +++ b/2018/kubevirtci.html @@ -225,7 +225,7 @@ diff --git a/2018/new-volume-types.html b/2018/new-volume-types.html index 1191ccb838..f6ffe699b3 100644 --- a/2018/new-volume-types.html +++ b/2018/new-volume-types.html @@ -225,7 +225,7 @@ diff --git a/2018/ovn-multi-network-plugin-for-kubernetes-kubetron.html b/2018/ovn-multi-network-plugin-for-kubernetes-kubetron.html index c09bcaaa8a..b618dc0979 100644 --- a/2018/ovn-multi-network-plugin-for-kubernetes-kubetron.html +++ b/2018/ovn-multi-network-plugin-for-kubernetes-kubetron.html @@ -225,7 +225,7 @@ diff --git a/2019/Access-Virtual-Machines-graphic-console-using-noVNC.html b/2019/Access-Virtual-Machines-graphic-console-using-noVNC.html deleted file mode 100644 index 312d19419d..0000000000 --- a/2019/Access-Virtual-Machines-graphic-console-using-noVNC.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Access Virtual Machines' graphic console using noVNC | KubeVirt.io - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
-
-

-

Access Virtual Machines' graphic console using noVNC

- -
-
-

Introduction

- -

NoVNC is a JavaScript VNC client using WebSockets and HTML5 Canvas. We provide websocket api for VNC access under

- -
APISERVER:/apis/subresources.kubevirt.io/v1alpha3/namespaces/NAMESPACE/virtualmachineinstances/VM/vnc
-
- -

but we can not access the VNC api directly since authorization is needed. In order to solve the problem, we provide a component using kubectl proxy to provide a authorized vnc acess, we name this Component virtVNC.

- -

In this post we are going to show how to do this in detail.

- -

The detailed method

- -

Prepare Docker Image

- -

First prepare docker build dicrectory.

- -
mkdir -p virtvnc/static
-
- -

Then clone noVNC files from github.

- -
git clone https://github.com/novnc/noVNC
-
- -

And then copy noVNC files to docker build directory.

- -
cp noVNC/app virtvnc/static/
-cp noVNC/core virtvnc/static/
-cp noVNC/vender virtvnc/static/
-cp noVNC/*.html virtvnc/static/
-
- -

Create a file index.html to virtvnc/static/ with the following content. The page will display VMs and corresponding VNC links.

- -
<html>
-  <meta charset="utf-8">
-    <style>
-     td {
-        padding: 5px;
-     }
-     .button {
-       background-color: white;
-       border: 2px solid black;
-       color: black;
-       padding: 5px;
-       text-align: center;
-       text-decoration: none;
-       display: inline-block;
-       font-size: 16px;
-       -webkit-transition-duration: 0.4s;
-       transition-duration: 0.4s;
-     }
-     .button:hover{
-       background-color: black;
-       color: white;
-       cursor: pointer;
-     }
-     button[disabled] {
-       opacity: .65;
-     }
-     button[disabled]:hover {
-       color: black;
-       background: white;
-     }
-   </style>
-    <!-- Promise polyfill for IE11 -->
-    <script src="vendor/promise.js"></script>
-
-    <!-- ES2015/ES6 modules polyfill -->
-    <script nomodule src="vendor/browser-es-module-loader/dist/browser-es-module-loader.js"></script>
-
-
-    <script type="module" crossorigin="anonymous">
-      import * as WebUtil from "./app/webutil.js";
-      const apiPrefix='k8s/apis'
-      function loadVMI(namespace) {
-        WebUtil.fetchJSON('/' + apiPrefix + '/kubevirt.io/v1alpha3/namespaces/' + namespace + '/virtualmachineinstances/')
-          .then((resp) => {
-            let vmis = [];
-            resp.items.forEach(i => {
-              let tr = document.createElement('tr');
-              tr.innerHTML="<td>" + i.metadata.name + "</td><td>" + String(i.status.phase) + "</td><td>" + String(i.status.interfaces !== undefined ? i.status.interfaces[0].ipAddress : '')  + "</td><td>" + String(i.status.nodeName !== undefined ? i.status.nodeName : '') + "</td><td><button class='button' " + String(i.status.phase =="Running" ? "" : "disabled")  + " onclick=\"window.open('vnc_lite.html?path=" + apiPrefix + "/subresources.kubevirt.io/v1alpha3/namespaces/" + namespace + "/virtualmachineinstances/" + i.metadata.name + "/vnc', 'novnc_window', 'resizable=yes,toolbar=no,location=no,status=no,scrollbars=no,menubar=no,width=1030,height=800')\">VNC</button></td>";
-              document.getElementById("vmis").appendChild(tr);
-            });
-            if (resp.items.length === 0) {
-              document.body.append("No virtual machines in the namespace.");
-            }
-          })
-          .catch(err => console.log("Failed to get vmis: " + err));
-       }
-       let namespace = WebUtil.getQueryVar('namespace', 'default');
-       loadVMI(namespace);
-    </script>
-  </meta>
-
-  <body>
-   <table><tbody id="vmis">
-   </tbody></table>
-  </body>
-</html>
-
- -

Create dockerfile with following content to add static html files and set up kubectl proxy command line args.

- -
FROM quay.io/bitnami/kubectl:1.15
-ADD static /static
-CMD ["proxy", "--www=/static", "--accept-hosts=^.*$", "--address=[::]", "--api-prefix=/k8s/", "--www-prefix="]
-
- -

Finally use docker build to build docker image.

- -
cd virtvnc
-docker build -t quay.io/samblade/virtvnc:v0.1 .
-
- -

Setting Up RBAC

- -

Create a service account for virtvnc.

- -
apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: virtvnc
-  namespace: kubevirt
-
- -

Then define cluster role for kubevirt, setting up permissions needed.

- -
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: virtvnc
-rules:
-  - apiGroups:
-      - subresources.kubevirt.io
-    resources:
-      - virtualmachineinstances/console
-      - virtualmachineinstances/vnc
-    verbs:
-      - get
-  - apiGroups:
-      - kubevirt.io
-    resources:
-      - virtualmachines
-      - virtualmachineinstances
-      - virtualmachineinstancepresets
-      - virtualmachineinstancereplicasets
-      - virtualmachineinstancemigrations
-    verbs:
-      - get
-      - list
-      - watch
-
- -

And then binding cluster role to service accout.

- -
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: virtvnc
-subjects:
-  - kind: ServiceAccount
-    name: virtvnc
-    namespace: kubevirt
-roleRef:
-  kind: ClusterRole
-  name: virtvnc
-  apiGroup: rbac.authorization.k8s.io
-
- -

Deploy to kubernetes

- -

Create following yaml, and then apply to kubernetes to setup virtvnc deployment.

- -
apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: virtvnc
-  namespace: kubevirt
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: virtvnc
-  template:
-    metadata:
-      labels:
-        app: virtvnc
-    spec:
-      serviceAccountName: virtvnc
-      nodeSelector:
-        node-role.kubernetes.io/master: ""
-      tolerations:
-        - key: "node-role.kubernetes.io/master"
-          operator: "Equal"
-          value: ""
-          effect: "NoSchedule"
-      containers:
-        - name: virtvnc
-          image: quay.io/samblade/virtvnc:v0.1
-          livenessProbe:
-            httpGet:
-              port: 8001
-              path: /
-              scheme: HTTP
-            failureThreshold: 30
-            initialDelaySeconds: 30
-            periodSeconds: 10
-            successThreshold: 1
-            timeoutSeconds: 5
-
- -

Expose a NodePort service, then we can access the web page from node network.

- -
apiVersion: v1
-kind: Service
-metadata:
-  labels:
-    app: virtvnc
-  name: virtvnc
-  namespace: kubevirt
-spec:
-  ports:
-    - port: 8001
-      protocol: TCP
-      targetPort: 8001
-  selector:
-    app: virtvnc
-  type: NodePort
-
- -

Note

This will make all your virtual machines vnc & console accessible to node network.**

- - -
-

The Simple Way

- -

In this github repo and registry you’ll find a ready to use version of the above which you can deploy in a single command like this:

- -
kubectl apply -f https://github.com/wavezhang/virtVNC/raw/master/k8s/virtvnc.yaml
-
- -

Access VNC

- -

First get node port of virtvnc service.

- -
kubectl get svc -n kubevirt virtvnc
-
- -

Then visit the following url in browser:

- -
http://NODEIP:NODEPORT/
-
- -

If you want manage virtual machines in other namespace, you can specify namespace using query param namespace like following:

- -
http://NODEIP:NODEPORT/?namespace=test
-
- -

VirtVNC

- -

References

- - - -
- - - - -
- - -
-
-
-
- - - - -
- - - - - - - - - - - - - - - - - - - diff --git a/2019/An-overview-to-KubeVirt-metrics.html b/2019/An-overview-to-KubeVirt-metrics.html index f7e6e1def7..79f84197c5 100644 --- a/2019/An-overview-to-KubeVirt-metrics.html +++ b/2019/An-overview-to-KubeVirt-metrics.html @@ -225,7 +225,7 @@ diff --git a/2019/CNCF-Sandbox.html b/2019/CNCF-Sandbox.html index 7f3689033b..9430e27de1 100644 --- a/2019/CNCF-Sandbox.html +++ b/2019/CNCF-Sandbox.html @@ -225,7 +225,7 @@ diff --git a/2019/How-To-Import-VM-into-Kubevirt.html b/2019/How-To-Import-VM-into-Kubevirt.html index b90a802ef8..09b1496da0 100644 --- a/2019/How-To-Import-VM-into-Kubevirt.html +++ b/2019/How-To-Import-VM-into-Kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2019/Hyper-Converged-Operator.html b/2019/Hyper-Converged-Operator.html index a274cffbbd..02c8d1e91f 100644 --- a/2019/Hyper-Converged-Operator.html +++ b/2019/Hyper-Converged-Operator.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt-CR-Condition-Types-Rename.html b/2019/KubeVirt-CR-Condition-Types-Rename.html index 452b5cf508..9ae4b2e719 100644 --- a/2019/KubeVirt-CR-Condition-Types-Rename.html +++ b/2019/KubeVirt-CR-Condition-Types-Rename.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_UI_options.html b/2019/KubeVirt_UI_options.html index 7dadf4697a..40fc592163 100644 --- a/2019/KubeVirt_UI_options.html +++ b/2019/KubeVirt_UI_options.html @@ -225,7 +225,7 @@ @@ -431,11 +431,7 @@

noVNC

noVNC is a JavaScript VNC client using WebSockets and HTML5 Canvas. It just allows you to connect through VNC to the virtual Machine already deployed in KubeVirt.

-

No VM management or even a dashboard is enabled with this option, it’s a pure DIY code that can embed the VNC access to the VM into HTML in any application or webpage. -There is a noVNC blogpost detailing how to install noVNC.

- -

In this animation you can see the feature of connecting to the Virtual Machine with noVNC: -noVNC

+

No VM management or even a dashboard is enabled with this option, it’s a pure DIY code that can embed the VNC access to the VM into HTML in any application or webpage.

Summary

diff --git a/2019/KubeVirt_basic_operations_video.html b/2019/KubeVirt_basic_operations_video.html index a9fc33aad8..752d22e020 100644 --- a/2019/KubeVirt_basic_operations_video.html +++ b/2019/KubeVirt_basic_operations_video.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_k8s_crio_from_scratch.html b/2019/KubeVirt_k8s_crio_from_scratch.html index 37b84b62e4..0a294dd841 100644 --- a/2019/KubeVirt_k8s_crio_from_scratch.html +++ b/2019/KubeVirt_k8s_crio_from_scratch.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_k8s_crio_from_scratch_installing_KubeVirt.html b/2019/KubeVirt_k8s_crio_from_scratch_installing_KubeVirt.html index 0316903daa..fd9c2149c3 100644 --- a/2019/KubeVirt_k8s_crio_from_scratch_installing_KubeVirt.html +++ b/2019/KubeVirt_k8s_crio_from_scratch_installing_KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_k8s_crio_from_scratch_installing_kubernetes.html b/2019/KubeVirt_k8s_crio_from_scratch_installing_kubernetes.html index 431d906c77..e1b4dfeade 100644 --- a/2019/KubeVirt_k8s_crio_from_scratch_installing_kubernetes.html +++ b/2019/KubeVirt_k8s_crio_from_scratch_installing_kubernetes.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_lab1_use_kubevirt.html b/2019/KubeVirt_lab1_use_kubevirt.html index c610462a5a..78631f88ef 100644 --- a/2019/KubeVirt_lab1_use_kubevirt.html +++ b/2019/KubeVirt_lab1_use_kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_lab2_experiment_with_cdi.html b/2019/KubeVirt_lab2_experiment_with_cdi.html index 5d256d3652..b05a28f084 100644 --- a/2019/KubeVirt_lab2_experiment_with_cdi.html +++ b/2019/KubeVirt_lab2_experiment_with_cdi.html @@ -225,7 +225,7 @@ diff --git a/2019/KubeVirt_storage_rook_ceph.html b/2019/KubeVirt_storage_rook_ceph.html index 665a827b0f..7360864515 100644 --- a/2019/KubeVirt_storage_rook_ceph.html +++ b/2019/KubeVirt_storage_rook_ceph.html @@ -225,7 +225,7 @@ diff --git a/2019/Kubevirt-CR-Condition-Types-Rename-Now-ACTIVE.html b/2019/Kubevirt-CR-Condition-Types-Rename-Now-ACTIVE.html index 4be7ce6b58..46e21f5de7 100644 --- a/2019/Kubevirt-CR-Condition-Types-Rename-Now-ACTIVE.html +++ b/2019/Kubevirt-CR-Condition-Types-Rename-Now-ACTIVE.html @@ -225,7 +225,7 @@ diff --git a/2019/Kubevirt-vagrant-provider.html b/2019/Kubevirt-vagrant-provider.html index 197a1c4aa6..6a55945d0b 100644 --- a/2019/Kubevirt-vagrant-provider.html +++ b/2019/Kubevirt-vagrant-provider.html @@ -225,7 +225,7 @@ diff --git a/2019/More-about-Kubevirt-metrics.html b/2019/More-about-Kubevirt-metrics.html index e48cd64611..f8fa4abe3e 100644 --- a/2019/More-about-Kubevirt-metrics.html +++ b/2019/More-about-Kubevirt-metrics.html @@ -225,7 +225,7 @@ diff --git a/2019/NodeDrain-KubeVirt.html b/2019/NodeDrain-KubeVirt.html index cf1f67737a..643a98f4bd 100644 --- a/2019/NodeDrain-KubeVirt.html +++ b/2019/NodeDrain-KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.12.0.html b/2019/changelog-v0.12.0.html index 17b91cfa30..a8b950d7fe 100644 --- a/2019/changelog-v0.12.0.html +++ b/2019/changelog-v0.12.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.13.0.html b/2019/changelog-v0.13.0.html index cde7641730..dad79c3d13 100644 --- a/2019/changelog-v0.13.0.html +++ b/2019/changelog-v0.13.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.14.0.html b/2019/changelog-v0.14.0.html index cfbf97af9c..0b117aaa26 100644 --- a/2019/changelog-v0.14.0.html +++ b/2019/changelog-v0.14.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.15.0.html b/2019/changelog-v0.15.0.html index 7d8d72ca8d..0344673f35 100644 --- a/2019/changelog-v0.15.0.html +++ b/2019/changelog-v0.15.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.16.0.html b/2019/changelog-v0.16.0.html index e6dda106fd..e54bac39f3 100644 --- a/2019/changelog-v0.16.0.html +++ b/2019/changelog-v0.16.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.17.0.html b/2019/changelog-v0.17.0.html index ee6a46b447..ac9309ea57 100644 --- a/2019/changelog-v0.17.0.html +++ b/2019/changelog-v0.17.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.18.0.html b/2019/changelog-v0.18.0.html index e33425eca2..bc339c2693 100644 --- a/2019/changelog-v0.18.0.html +++ b/2019/changelog-v0.18.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.19.0.html b/2019/changelog-v0.19.0.html index d994b9dc55..b572c6c83b 100644 --- a/2019/changelog-v0.19.0.html +++ b/2019/changelog-v0.19.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.20.0.html b/2019/changelog-v0.20.0.html index e3dbea3781..97ca406d5a 100644 --- a/2019/changelog-v0.20.0.html +++ b/2019/changelog-v0.20.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.21.0.html b/2019/changelog-v0.21.0.html index e11ae09d5a..e7ee51fbbd 100644 --- a/2019/changelog-v0.21.0.html +++ b/2019/changelog-v0.21.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.22.0.html b/2019/changelog-v0.22.0.html index 7472eee236..d247a89136 100644 --- a/2019/changelog-v0.22.0.html +++ b/2019/changelog-v0.22.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.23.0.html b/2019/changelog-v0.23.0.html index 7aeca44353..58bdac1b6d 100644 --- a/2019/changelog-v0.23.0.html +++ b/2019/changelog-v0.23.0.html @@ -225,7 +225,7 @@ diff --git a/2019/changelog-v0.24.0.html b/2019/changelog-v0.24.0.html index 22183059e7..66031a31cd 100644 --- a/2019/changelog-v0.24.0.html +++ b/2019/changelog-v0.24.0.html @@ -225,7 +225,7 @@ diff --git a/2019/federated-kubevirt.html b/2019/federated-kubevirt.html index 1a735e5566..b848d195bc 100644 --- a/2019/federated-kubevirt.html +++ b/2019/federated-kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2019/jenkins-ci-server-upgrade-and-jobs-for-kubevirt.html b/2019/jenkins-ci-server-upgrade-and-jobs-for-kubevirt.html index 6d3fbde283..344f8bd044 100644 --- a/2019/jenkins-ci-server-upgrade-and-jobs-for-kubevirt.html +++ b/2019/jenkins-ci-server-upgrade-and-jobs-for-kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2019/jenkins-jobs-for-kubevirt-lab-validation.html b/2019/jenkins-jobs-for-kubevirt-lab-validation.html index 210f29d290..7ffa44fc25 100644 --- a/2019/jenkins-jobs-for-kubevirt-lab-validation.html +++ b/2019/jenkins-jobs-for-kubevirt-lab-validation.html @@ -225,7 +225,7 @@ diff --git a/2019/kubecon-na-2019.html b/2019/kubecon-na-2019.html index 17183ed360..44521ba783 100644 --- a/2019/kubecon-na-2019.html +++ b/2019/kubecon-na-2019.html @@ -225,7 +225,7 @@ diff --git a/2019/kubevirt-with-ansible-part-1.html b/2019/kubevirt-with-ansible-part-1.html index 9069b5ff32..427902433a 100644 --- a/2019/kubevirt-with-ansible-part-1.html +++ b/2019/kubevirt-with-ansible-part-1.html @@ -225,7 +225,7 @@ diff --git a/2019/kubevirt-with-ansible-part-2.html b/2019/kubevirt-with-ansible-part-2.html index 637d2ca14d..5cdde12659 100644 --- a/2019/kubevirt-with-ansible-part-2.html +++ b/2019/kubevirt-with-ansible-part-2.html @@ -225,7 +225,7 @@ diff --git a/2019/prow-jobs-for-kubevirt.html b/2019/prow-jobs-for-kubevirt.html index dad9732e87..be03fca2c4 100644 --- a/2019/prow-jobs-for-kubevirt.html +++ b/2019/prow-jobs-for-kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2019/website-roadmap.html b/2019/website-roadmap.html index 92bc2f42ab..ef39c2c1eb 100644 --- a/2019/website-roadmap.html +++ b/2019/website-roadmap.html @@ -225,7 +225,7 @@ diff --git a/2020/Advanced-scheduling-with-affinity-rules.html b/2020/Advanced-scheduling-with-affinity-rules.html index 113922ff59..5c5a60918c 100644 --- a/2020/Advanced-scheduling-with-affinity-rules.html +++ b/2020/Advanced-scheduling-with-affinity-rules.html @@ -225,7 +225,7 @@ diff --git a/2020/Common_templates.html b/2020/Common_templates.html index 4b36c90a89..1395059a8f 100644 --- a/2020/Common_templates.html +++ b/2020/Common_templates.html @@ -225,7 +225,7 @@ diff --git a/2020/Customizing-images-for-containerized-vms.html b/2020/Customizing-images-for-containerized-vms.html index 6309b9464e..a3a330733a 100644 --- a/2020/Customizing-images-for-containerized-vms.html +++ b/2020/Customizing-images-for-containerized-vms.html @@ -225,7 +225,7 @@ diff --git a/2020/Import-VM-from-oVirt.html b/2020/Import-VM-from-oVirt.html index 30fffe909d..546a766085 100644 --- a/2020/Import-VM-from-oVirt.html +++ b/2020/Import-VM-from-oVirt.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt-Architecture-Fundamentals.html b/2020/KubeVirt-Architecture-Fundamentals.html index 4f1b62f2d9..fcb62a16f6 100644 --- a/2020/KubeVirt-Architecture-Fundamentals.html +++ b/2020/KubeVirt-Architecture-Fundamentals.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt-Operation-Fundamentals.html b/2020/KubeVirt-Operation-Fundamentals.html index cb844ea8cc..1532773456 100644 --- a/2020/KubeVirt-Operation-Fundamentals.html +++ b/2020/KubeVirt-Operation-Fundamentals.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt-Security-Fundamentals.html b/2020/KubeVirt-Security-Fundamentals.html index d271fbd5d5..e4b9580c5c 100644 --- a/2020/KubeVirt-Security-Fundamentals.html +++ b/2020/KubeVirt-Security-Fundamentals.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt-VM-Image-Usage-Patterns.html b/2020/KubeVirt-VM-Image-Usage-Patterns.html index a095fa834e..60d8b74286 100644 --- a/2020/KubeVirt-VM-Image-Usage-Patterns.html +++ b/2020/KubeVirt-VM-Image-Usage-Patterns.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt-installing_Microsoft_Windows_from_an_iso.html b/2020/KubeVirt-installing_Microsoft_Windows_from_an_iso.html index 44f6476a83..7da75426bf 100644 --- a/2020/KubeVirt-installing_Microsoft_Windows_from_an_iso.html +++ b/2020/KubeVirt-installing_Microsoft_Windows_from_an_iso.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt_Intro-Virtual_Machine_Management_on_Kubernetes.html b/2020/KubeVirt_Intro-Virtual_Machine_Management_on_Kubernetes.html index cad7a74ef5..44abd409d0 100644 --- a/2020/KubeVirt_Intro-Virtual_Machine_Management_on_Kubernetes.html +++ b/2020/KubeVirt_Intro-Virtual_Machine_Management_on_Kubernetes.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt_deep_dive-virtualized_gpu_workloads.html b/2020/KubeVirt_deep_dive-virtualized_gpu_workloads.html index 44de37668c..cadbb66cd1 100644 --- a/2020/KubeVirt_deep_dive-virtualized_gpu_workloads.html +++ b/2020/KubeVirt_deep_dive-virtualized_gpu_workloads.html @@ -225,7 +225,7 @@ diff --git a/2020/KubeVirt_lab3_upgrade.html b/2020/KubeVirt_lab3_upgrade.html index 58faec841f..e66c66a270 100644 --- a/2020/KubeVirt_lab3_upgrade.html +++ b/2020/KubeVirt_lab3_upgrade.html @@ -225,7 +225,7 @@ diff --git a/2020/Live-migration.html b/2020/Live-migration.html index 53e4235dba..3b3c63ef48 100644 --- a/2020/Live-migration.html +++ b/2020/Live-migration.html @@ -225,7 +225,7 @@ diff --git a/2020/Minikube_KubeVirt_Addon.html b/2020/Minikube_KubeVirt_Addon.html index 3bd551524e..d5c20df568 100644 --- a/2020/Minikube_KubeVirt_Addon.html +++ b/2020/Minikube_KubeVirt_Addon.html @@ -225,7 +225,7 @@ diff --git a/2020/Monitoring-KubeVirt-VMs-from-the-inside.html b/2020/Monitoring-KubeVirt-VMs-from-the-inside.html index d56547d7f4..1d0541de2b 100644 --- a/2020/Monitoring-KubeVirt-VMs-from-the-inside.html +++ b/2020/Monitoring-KubeVirt-VMs-from-the-inside.html @@ -225,7 +225,7 @@ diff --git a/2020/Multiple-Network-Attachments-with-bridge-CNI.html b/2020/Multiple-Network-Attachments-with-bridge-CNI.html index dc21a4a1b6..9797fbde1d 100644 --- a/2020/Multiple-Network-Attachments-with-bridge-CNI.html +++ b/2020/Multiple-Network-Attachments-with-bridge-CNI.html @@ -225,7 +225,7 @@ diff --git a/2020/OKD-web-console-install.html b/2020/OKD-web-console-install.html index a44a244a7e..e676a6cfc5 100644 --- a/2020/OKD-web-console-install.html +++ b/2020/OKD-web-console-install.html @@ -225,7 +225,7 @@ diff --git a/2020/SELinux-from-basics-to-KubeVirt.html b/2020/SELinux-from-basics-to-KubeVirt.html index a8d316d6ed..8cf0dfa3c0 100644 --- a/2020/SELinux-from-basics-to-KubeVirt.html +++ b/2020/SELinux-from-basics-to-KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.25.0.html b/2020/changelog-v0.25.0.html index 15ec27b168..4c4fa8a663 100644 --- a/2020/changelog-v0.25.0.html +++ b/2020/changelog-v0.25.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.26.0.html b/2020/changelog-v0.26.0.html index 1b6a057552..28afdae398 100644 --- a/2020/changelog-v0.26.0.html +++ b/2020/changelog-v0.26.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.27.0.html b/2020/changelog-v0.27.0.html index ca2467f513..528a5cd7ad 100644 --- a/2020/changelog-v0.27.0.html +++ b/2020/changelog-v0.27.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.28.0.html b/2020/changelog-v0.28.0.html index f74c89af3e..efabfb77d6 100644 --- a/2020/changelog-v0.28.0.html +++ b/2020/changelog-v0.28.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.29.0.html b/2020/changelog-v0.29.0.html index ecc4ca7fda..97317fa051 100644 --- a/2020/changelog-v0.29.0.html +++ b/2020/changelog-v0.29.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.30.0.html b/2020/changelog-v0.30.0.html index cacf025476..20363f31d6 100644 --- a/2020/changelog-v0.30.0.html +++ b/2020/changelog-v0.30.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.31.0.html b/2020/changelog-v0.31.0.html index f7bc61cca7..3df5802ec4 100644 --- a/2020/changelog-v0.31.0.html +++ b/2020/changelog-v0.31.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.32.0.html b/2020/changelog-v0.32.0.html index f555ad49e6..7202c35cc2 100644 --- a/2020/changelog-v0.32.0.html +++ b/2020/changelog-v0.32.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.33.0.html b/2020/changelog-v0.33.0.html index 67aeeb38b5..ae464ffeb2 100644 --- a/2020/changelog-v0.33.0.html +++ b/2020/changelog-v0.33.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.34.0.html b/2020/changelog-v0.34.0.html index aa6853d6d6..027121e542 100644 --- a/2020/changelog-v0.34.0.html +++ b/2020/changelog-v0.34.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.35.0.html b/2020/changelog-v0.35.0.html index f973c0d229..df85137673 100644 --- a/2020/changelog-v0.35.0.html +++ b/2020/changelog-v0.35.0.html @@ -225,7 +225,7 @@ diff --git a/2020/changelog-v0.36.0.html b/2020/changelog-v0.36.0.html index 8798372a9d..ae6cb5527c 100644 --- a/2020/changelog-v0.36.0.html +++ b/2020/changelog-v0.36.0.html @@ -225,7 +225,7 @@ diff --git a/2020/run_strategies.html b/2020/run_strategies.html index 32ec6500e8..60ecb38cca 100644 --- a/2020/run_strategies.html +++ b/2020/run_strategies.html @@ -225,7 +225,7 @@ diff --git a/2020/win_workload_in_k8s.html b/2020/win_workload_in_k8s.html index ce4049042f..ec04846f49 100644 --- a/2020/win_workload_in_k8s.html +++ b/2020/win_workload_in_k8s.html @@ -225,7 +225,7 @@ diff --git a/2021/Automated-Windows-Installation-With-Tekton-Pipelines.html b/2021/Automated-Windows-Installation-With-Tekton-Pipelines.html index f40b55719c..e4b3885534 100644 --- a/2021/Automated-Windows-Installation-With-Tekton-Pipelines.html +++ b/2021/Automated-Windows-Installation-With-Tekton-Pipelines.html @@ -225,7 +225,7 @@ diff --git a/2021/Importing-EC2-to-KubeVirt.html b/2021/Importing-EC2-to-KubeVirt.html index a401e053fa..131e59fde5 100644 --- a/2021/Importing-EC2-to-KubeVirt.html +++ b/2021/Importing-EC2-to-KubeVirt.html @@ -225,7 +225,7 @@ diff --git a/2021/KubeVirt-Summit-Wrap-Up.html b/2021/KubeVirt-Summit-Wrap-Up.html index 1a49abe1cb..22d4db4512 100644 --- a/2021/KubeVirt-Summit-Wrap-Up.html +++ b/2021/KubeVirt-Summit-Wrap-Up.html @@ -225,7 +225,7 @@ diff --git a/2021/KubeVirt-Summit-announce.html b/2021/KubeVirt-Summit-announce.html index 9c3252d055..6286af8fea 100644 --- a/2021/KubeVirt-Summit-announce.html +++ b/2021/KubeVirt-Summit-announce.html @@ -225,7 +225,7 @@ diff --git a/2021/Running-Realtime-Workloads.html b/2021/Running-Realtime-Workloads.html index 3a43f5176b..c9c2e2c695 100644 --- a/2021/Running-Realtime-Workloads.html +++ b/2021/Running-Realtime-Workloads.html @@ -225,7 +225,7 @@ diff --git a/2021/Virtual-machines-in-Istio-service-mesh.html b/2021/Virtual-machines-in-Istio-service-mesh.html index c3888de2b9..1eeffa21e9 100644 --- a/2021/Virtual-machines-in-Istio-service-mesh.html +++ b/2021/Virtual-machines-in-Istio-service-mesh.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.37.0.html b/2021/changelog-v0.37.0.html index 40ff27aee9..86d250d5f9 100644 --- a/2021/changelog-v0.37.0.html +++ b/2021/changelog-v0.37.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.38.0.html b/2021/changelog-v0.38.0.html index e91790986d..9baa1bb092 100644 --- a/2021/changelog-v0.38.0.html +++ b/2021/changelog-v0.38.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.39.0.html b/2021/changelog-v0.39.0.html index ca541dfb1a..88c294587c 100644 --- a/2021/changelog-v0.39.0.html +++ b/2021/changelog-v0.39.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.40.0.html b/2021/changelog-v0.40.0.html index 6088777f64..a634e73a6c 100644 --- a/2021/changelog-v0.40.0.html +++ b/2021/changelog-v0.40.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.41.0.html b/2021/changelog-v0.41.0.html index 96c193ff0b..9fb4bb2f86 100644 --- a/2021/changelog-v0.41.0.html +++ b/2021/changelog-v0.41.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.42.0.html b/2021/changelog-v0.42.0.html index d32a0ae6d4..6d5bf9ddca 100644 --- a/2021/changelog-v0.42.0.html +++ b/2021/changelog-v0.42.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.43.0.html b/2021/changelog-v0.43.0.html index f8fe4326d1..db8e358780 100644 --- a/2021/changelog-v0.43.0.html +++ b/2021/changelog-v0.43.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.44.0.html b/2021/changelog-v0.44.0.html index 83c1debb34..176fc3cc6b 100644 --- a/2021/changelog-v0.44.0.html +++ b/2021/changelog-v0.44.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.45.0.html b/2021/changelog-v0.45.0.html index f1333e5632..d5e5833392 100644 --- a/2021/changelog-v0.45.0.html +++ b/2021/changelog-v0.45.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.46.0.html b/2021/changelog-v0.46.0.html index 23819cd072..dccd7f7c07 100644 --- a/2021/changelog-v0.46.0.html +++ b/2021/changelog-v0.46.0.html @@ -225,7 +225,7 @@ diff --git a/2021/changelog-v0.48.0.html b/2021/changelog-v0.48.0.html index 3200c35883..c16e8ed1cb 100644 --- a/2021/changelog-v0.48.0.html +++ b/2021/changelog-v0.48.0.html @@ -225,7 +225,7 @@ diff --git a/2021/intel-vgpu-kubevirt.html b/2021/intel-vgpu-kubevirt.html index 92a8944e56..d3730a01d2 100644 --- a/2021/intel-vgpu-kubevirt.html +++ b/2021/intel-vgpu-kubevirt.html @@ -225,7 +225,7 @@ diff --git a/2021/kubevirt-api-auth.html b/2021/kubevirt-api-auth.html index afbec8231f..19fa86956a 100644 --- a/2021/kubevirt-api-auth.html +++ b/2021/kubevirt-api-auth.html @@ -225,7 +225,7 @@ diff --git a/2022/Dedicated-migration-network.html b/2022/Dedicated-migration-network.html index df5c8d8520..2b4bf02963 100644 --- a/2022/Dedicated-migration-network.html +++ b/2022/Dedicated-migration-network.html @@ -225,7 +225,7 @@ diff --git a/2022/KubeVirt-Introduction-of-instancetypes.html b/2022/KubeVirt-Introduction-of-instancetypes.html index f277b2e962..7da85761ff 100644 --- a/2022/KubeVirt-Introduction-of-instancetypes.html +++ b/2022/KubeVirt-Introduction-of-instancetypes.html @@ -225,7 +225,7 @@ diff --git a/2022/KubeVirt-Summit-2022.html b/2022/KubeVirt-Summit-2022.html index 874d38b671..b8ec633884 100644 --- a/2022/KubeVirt-Summit-2022.html +++ b/2022/KubeVirt-Summit-2022.html @@ -225,7 +225,7 @@ diff --git a/2022/KubeVirt-at-KubeCon-EU-2022.html b/2022/KubeVirt-at-KubeCon-EU-2022.html index f77457169d..da102b6901 100644 --- a/2022/KubeVirt-at-KubeCon-EU-2022.html +++ b/2022/KubeVirt-at-KubeCon-EU-2022.html @@ -225,7 +225,7 @@ diff --git a/2022/KubeVirt-installing_Microsoft_Windows_11_from_an_iso.html b/2022/KubeVirt-installing_Microsoft_Windows_11_from_an_iso.html index 845c7871b7..59f3985d62 100644 --- a/2022/KubeVirt-installing_Microsoft_Windows_11_from_an_iso.html +++ b/2022/KubeVirt-installing_Microsoft_Windows_11_from_an_iso.html @@ -225,7 +225,7 @@ diff --git a/2022/Virtual-Machines-with-MetalLB.html b/2022/Virtual-Machines-with-MetalLB.html index c1431b242b..b49f169996 100644 --- a/2022/Virtual-Machines-with-MetalLB.html +++ b/2022/Virtual-Machines-with-MetalLB.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.49.0.html b/2022/changelog-v0.49.0.html index 5268bd5255..ff1ac61037 100644 --- a/2022/changelog-v0.49.0.html +++ b/2022/changelog-v0.49.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.50.0.html b/2022/changelog-v0.50.0.html index bbf7a38c80..fc83e7e0e3 100644 --- a/2022/changelog-v0.50.0.html +++ b/2022/changelog-v0.50.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.51.0.html b/2022/changelog-v0.51.0.html index b304aadad1..8698e4f055 100644 --- a/2022/changelog-v0.51.0.html +++ b/2022/changelog-v0.51.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.52.0.html b/2022/changelog-v0.52.0.html index 032c612838..b913fd7d13 100644 --- a/2022/changelog-v0.52.0.html +++ b/2022/changelog-v0.52.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.53.0.html b/2022/changelog-v0.53.0.html index c69393daa7..7603e3354b 100644 --- a/2022/changelog-v0.53.0.html +++ b/2022/changelog-v0.53.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.54.0.html b/2022/changelog-v0.54.0.html index d922a12cb8..2528a1ab67 100644 --- a/2022/changelog-v0.54.0.html +++ b/2022/changelog-v0.54.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.55.0.html b/2022/changelog-v0.55.0.html index 9edb5808a1..2f98df6af4 100644 --- a/2022/changelog-v0.55.0.html +++ b/2022/changelog-v0.55.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.56.0.html b/2022/changelog-v0.56.0.html index adf407382e..6b3d069632 100644 --- a/2022/changelog-v0.56.0.html +++ b/2022/changelog-v0.56.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.57.0.html b/2022/changelog-v0.57.0.html index beac0d1bb3..9c713ca23a 100644 --- a/2022/changelog-v0.57.0.html +++ b/2022/changelog-v0.57.0.html @@ -225,7 +225,7 @@ diff --git a/2022/changelog-v0.58.0.html b/2022/changelog-v0.58.0.html index c4352b2c87..75cdc63aec 100644 --- a/2022/changelog-v0.58.0.html +++ b/2022/changelog-v0.58.0.html @@ -225,7 +225,7 @@ diff --git a/2023/Announcing-KubeVirt-v1-1.html b/2023/Announcing-KubeVirt-v1-1.html index 6421036de0..9083647176 100644 --- a/2023/Announcing-KubeVirt-v1-1.html +++ b/2023/Announcing-KubeVirt-v1-1.html @@ -225,7 +225,7 @@ diff --git a/2023/KubeVirt-Summit-2023.html b/2023/KubeVirt-Summit-2023.html index dd7f171629..b558c4a8ba 100644 --- a/2023/KubeVirt-Summit-2023.html +++ b/2023/KubeVirt-Summit-2023.html @@ -225,7 +225,7 @@ diff --git a/2023/KubeVirt-on-autoscaling-nodes.html b/2023/KubeVirt-on-autoscaling-nodes.html index e3338a1dd0..c4fda4dc6c 100644 --- a/2023/KubeVirt-on-autoscaling-nodes.html +++ b/2023/KubeVirt-on-autoscaling-nodes.html @@ -225,7 +225,7 @@ diff --git a/2023/KubeVirt-v1-has-landed.html b/2023/KubeVirt-v1-has-landed.html index 443ee3a78b..ee560ddcef 100644 --- a/2023/KubeVirt-v1-has-landed.html +++ b/2023/KubeVirt-v1-has-landed.html @@ -225,7 +225,7 @@ diff --git a/2023/Managing-KubeVirt-VMs-with-Ansible.html b/2023/Managing-KubeVirt-VMs-with-Ansible.html index 62142c716c..8ac3cd1822 100644 --- a/2023/Managing-KubeVirt-VMs-with-Ansible.html +++ b/2023/Managing-KubeVirt-VMs-with-Ansible.html @@ -225,7 +225,7 @@ diff --git a/2023/OVN-kubernetes-secondary-networks-localnet.html b/2023/OVN-kubernetes-secondary-networks-localnet.html index 6b4892f90b..a48cd43e45 100644 --- a/2023/OVN-kubernetes-secondary-networks-localnet.html +++ b/2023/OVN-kubernetes-secondary-networks-localnet.html @@ -225,7 +225,7 @@ diff --git a/2023/OVN-kubernetes-secondary-networks-policies.html b/2023/OVN-kubernetes-secondary-networks-policies.html index 4d2f6560f6..bd72043f52 100644 --- a/2023/OVN-kubernetes-secondary-networks-policies.html +++ b/2023/OVN-kubernetes-secondary-networks-policies.html @@ -225,7 +225,7 @@ diff --git a/2023/OVN-kubernetes-secondary-networks.html b/2023/OVN-kubernetes-secondary-networks.html index d264c8a228..0457ae1232 100644 --- a/2023/OVN-kubernetes-secondary-networks.html +++ b/2023/OVN-kubernetes-secondary-networks.html @@ -225,7 +225,7 @@ diff --git a/2023/changelog-v0.59.0.html b/2023/changelog-v0.59.0.html index 4103eb7059..1f02dfc1d0 100644 --- a/2023/changelog-v0.59.0.html +++ b/2023/changelog-v0.59.0.html @@ -225,7 +225,7 @@ diff --git a/2023/changelog-v1.0.0.html b/2023/changelog-v1.0.0.html index 91ff1eab04..54e50413d3 100644 --- a/2023/changelog-v1.0.0.html +++ b/2023/changelog-v1.0.0.html @@ -225,7 +225,7 @@ diff --git a/2023/changelog-v1.1.0.html b/2023/changelog-v1.1.0.html index de645f919c..d6676f4613 100644 --- a/2023/changelog-v1.1.0.html +++ b/2023/changelog-v1.1.0.html @@ -225,7 +225,7 @@ diff --git a/404.html b/404.html index 8b8d4b608a..9b691067e2 100644 --- a/404.html +++ b/404.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/applications-aware-quota/index.html b/applications-aware-quota/index.html index 369920903a..98b078ad32 100644 --- a/applications-aware-quota/index.html +++ b/applications-aware-quota/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/blogs/community.html b/blogs/community.html index 2da85f5703..c6632fdd6e 100644 --- a/blogs/community.html +++ b/blogs/community.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -643,8 +643,6 @@

Additional filters

- - diff --git a/blogs/date.html b/blogs/date.html index 47b4c2bf50..56917a1393 100644 --- a/blogs/date.html +++ b/blogs/date.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -2268,16 +2268,6 @@

Additional filters

- - - - - - - - - - @@ -3970,7 +3960,7 @@

Additional filters

Post calendar

- +
JanFebMarAprMayJunJulAugSepOctNovDec
2023  3 1 3 2 2 
20223112121311  
20212123112222 1
2020352432321214
2019322222532754
2018523486313442
2017      2 4454
JanFebMarAprMayJunJulAugSepOctNovDec
2023  3 1 3 2 2 
20223112121311  
20212123112222 1
2020352432321214
2019322222532744
2018523486313442
2017      2 4454
@@ -6476,14 +6466,6 @@

December

- - - - - - - - @@ -6520,10 +6502,6 @@

November

📅 12: KubeVirt at KubeCon + CloudNativeCon North America -
  • - 📅 11: Access Virtual Machines' graphic console using noVNC -
  • -
  • 📅 04: KubeVirt v0.23.0
  • diff --git a/blogs/index.html b/blogs/index.html index e32aceadfa..789f2109e1 100644 --- a/blogs/index.html +++ b/blogs/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    diff --git a/blogs/news.html b/blogs/news.html index cbaa5552ec..cbcd029b51 100644 --- a/blogs/news.html +++ b/blogs/news.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -974,19 +974,6 @@

    KubeVirt at KubeCon -
  • -

    Access Virtual Machines' graphic console using noVNC

    -
    November 11, 2019
    - Demonstrate how to access virtual machines' graphic console using noVNC. - - - -
  • - - -
  • diff --git a/blogs/page10/index.html b/blogs/page10/index.html index b2c03cdceb..300b1d5e16 100644 --- a/blogs/page10/index.html +++ b/blogs/page10/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    High Availability -- - - - of 37 + of 36 diff --git a/blogs/page11/index.html b/blogs/page11/index.html index 7ce4c429f2..5cda1a56e9 100644 --- a/blogs/page11/index.html +++ b/blogs/page11/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt v0.32.0< - - - of 37 + of 36 diff --git a/blogs/page12/index.html b/blogs/page12/index.html index 4c604b1f9d..6af752e266 100644 --- a/blogs/page12/index.html +++ b/blogs/page12/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    Migrate a sampl - - - of 37 + of 36 diff --git a/blogs/page13/index.html b/blogs/page13/index.html index 077db3361b..61297fa848 100644 --- a/blogs/page13/index.html +++ b/blogs/page13/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    Kub - - - of 37 + of 36 diff --git a/blogs/page14/index.html b/blogs/page14/index.html index 0d290ad48a..86eae7f7a6 100644 --- a/blogs/page14/index.html +++ b/blogs/page14/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt v0.27.0< - - - of 37 + of 36 diff --git a/blogs/page15/index.html b/blogs/page15/index.html index 7290d01417..8cd5f9786f 100644 --- a/blogs/page15/index.html +++ b/blogs/page15/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    diff --git a/blogs/page16/index.html b/blogs/page16/index.html index 28e8998fa2..c164fd5d1c 100644 --- a/blogs/page16/index.html +++ b/blogs/page16/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    K - - - of 37 + of 36 diff --git a/blogs/page17/index.html b/blogs/page17/index.html index 7ca90b779e..3072b4d55d 100644 --- a/blogs/page17/index.html +++ b/blogs/page17/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt at KubeCon - - - of 37 + of 36 diff --git a/blogs/page18/index.html b/blogs/page18/index.html index 4d874e4da7..68bb0ceca4 100644 --- a/blogs/page18/index.html +++ b/blogs/page18/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Persiste - - - of 37 + of 36 diff --git a/blogs/page19/index.html b/blogs/page19/index.html index 3d10782128..b75e6df6f1 100644 --- a/blogs/page19/index.html +++ b/blogs/page19/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    KubeVirt v0.21.0< - - - of 37 + of 36 diff --git a/blogs/page2/index.html b/blogs/page2/index.html index b1fc8d0d95..9490f5ca7d 100644 --- a/blogs/page2/index.html +++ b/blogs/page2/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt Summi - - - of 37 + of 36 diff --git a/blogs/page20/index.html b/blogs/page20/index.html index 13ef04017f..1b31f0771c 100644 --- a/blogs/page20/index.html +++ b/blogs/page20/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Node Drain in Ku - - - of 37 + of 36 diff --git a/blogs/page21/index.html b/blogs/page21/index.html index 25b687e6f1..20e95e9aaf 100644 --- a/blogs/page21/index.html +++ b/blogs/page21/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    KubeVirt v0.18.0< - - - of 37 + of 36 diff --git a/blogs/page22/index.html b/blogs/page22/index.html index a133b5858d..8bfabbd760 100644 --- a/blogs/page22/index.html +++ b/blogs/page22/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    KubeVirt v0.16.0< - - - of 37 + of 36 diff --git a/blogs/page23/index.html b/blogs/page23/index.html index 11a0ba49fd..35db66d31e 100644 --- a/blogs/page23/index.html +++ b/blogs/page23/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    An - - - of 37 + of 36 diff --git a/blogs/page24/index.html b/blogs/page24/index.html index 172b1455ae..fb7777c968 100644 --- a/blogs/page24/index.html +++ b/blogs/page24/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Kubevirt At - - - of 37 + of 36 diff --git a/blogs/page25/index.html b/blogs/page25/index.html index 4ecf2a4b22..d7de68201d 100644 --- a/blogs/page25/index.html +++ b/blogs/page25/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Contain - - - of 37 + of 36 diff --git a/blogs/page26/index.html b/blogs/page26/index.html index 2016d05f9a..8ef23d9d53 100644 --- a/blogs/page26/index.html +++ b/blogs/page26/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    KubeVirt v0.8.0of 37 + of 36 diff --git a/blogs/page27/index.html b/blogs/page27/index.html index 27afab0b5f..1d49a3fd98 100644 --- a/blogs/page27/index.html +++ b/blogs/page27/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Run Istio W - - - of 37 + of 36 diff --git a/blogs/page28/index.html b/blogs/page28/index.html index c4a2f9dd83..49e9e29fc8 100644 --- a/blogs/page28/index.html +++ b/blogs/page28/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    diff --git a/blogs/page29/index.html b/blogs/page29/index.html index 4babcea34f..ef056a32ba 100644 --- a/blogs/page29/index.html +++ b/blogs/page29/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Kubevirt Objectsof 37 + of 36 diff --git a/blogs/page3/index.html b/blogs/page3/index.html index 2f21d818b9..daeec3dd0a 100644 --- a/blogs/page3/index.html +++ b/blogs/page3/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    diff --git a/blogs/page30/index.html b/blogs/page30/index.html index 67eecf6f72..4282e057a2 100644 --- a/blogs/page30/index.html +++ b/blogs/page30/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Kubevirt - - - of 37 + of 36 diff --git a/blogs/page31/index.html b/blogs/page31/index.html index 045a83604f..cff54a9177 100644 --- a/blogs/page31/index.html +++ b/blogs/page31/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    KubeVirt v0.3.0of 37 + of 36 diff --git a/blogs/page32/index.html b/blogs/page32/index.html index 520c409ac7..637390c3cc 100644 --- a/blogs/page32/index.html +++ b/blogs/page32/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    diff --git a/blogs/page33/index.html b/blogs/page33/index.html index c9dd439f93..628b30461a 100644 --- a/blogs/page33/index.html +++ b/blogs/page33/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Kube Virt v0.1.0 - - - of 37 + of 36 diff --git a/blogs/page34/index.html b/blogs/page34/index.html index 8b60db1326..29df03fc28 100644 --- a/blogs/page34/index.html +++ b/blogs/page34/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    Kube Virt v0.0.4 - - - of 37 + of 36 diff --git a/blogs/page35/index.html b/blogs/page35/index.html index aada0921cb..c784a826ce 100644 --- a/blogs/page35/index.html +++ b/blogs/page35/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,9 +436,7 @@

    This Week - - - of 37 + of 36 diff --git a/blogs/page36/index.html b/blogs/page36/index.html index f1321df862..194e041166 100644 --- a/blogs/page36/index.html +++ b/blogs/page36/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -296,17 +296,6 @@

    Additional filters

    @@ -436,14 +436,12 @@

    Comparing Kub 36 - - - of 37 + of 36 - + Old - + diff --git a/blogs/page37/index.html b/blogs/page37/index.html deleted file mode 100644 index 5fe5825dc2..0000000000 --- a/blogs/page37/index.html +++ /dev/null @@ -1,482 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Blogs | KubeVirt.io - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -
    -

    Blogs

    -
    -
    -
    - - - - - - - - - - - -
    -
    - -
    - - - -
    -
    - -
    - - - - - - - - - - - - - - - - - - - diff --git a/blogs/page4/index.html b/blogs/page4/index.html index 7b8df585e7..e6f5a2fccd 100644 --- a/blogs/page4/index.html +++ b/blogs/page4/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt v0.53.0< - - - of 37 + of 36 diff --git a/blogs/page5/index.html b/blogs/page5/index.html index 2803beb5e8..ac20f71e7d 100644 --- a/blogs/page5/index.html +++ b/blogs/page5/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    Dedicat - - - of 37 + of 36 diff --git a/blogs/page6/index.html b/blogs/page6/index.html index ecbb1f293e..f7bb75b1c1 100644 --- a/blogs/page6/index.html +++ b/blogs/page6/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt v0.46.0< - - - of 37 + of 36 diff --git a/blogs/page7/index.html b/blogs/page7/index.html index d1d7ed7b44..c0515408fa 100644 --- a/blogs/page7/index.html +++ b/blogs/page7/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    Kubernetes Authen - - - of 37 + of 36 diff --git a/blogs/page8/index.html b/blogs/page8/index.html index 10a60fb7ea..5598980ac5 100644 --- a/blogs/page8/index.html +++ b/blogs/page8/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    diff --git a/blogs/page9/index.html b/blogs/page9/index.html index dc17af4eb5..568707d46d 100644 --- a/blogs/page9/index.html +++ b/blogs/page9/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -436,9 +436,7 @@

    KubeVirt v0.37.0< - - - of 37 + of 36 diff --git a/blogs/releases.html b/blogs/releases.html index df39bc0d71..7071d0488d 100644 --- a/blogs/releases.html +++ b/blogs/releases.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -864,8 +864,6 @@

    KubeVirt v0.24.0< - -
  • KubeVirt v0.23.0

    November 04, 2019
    diff --git a/blogs/uncategorized.html b/blogs/uncategorized.html index 52ac9a5cfd..33290ec35c 100644 --- a/blogs/uncategorized.html +++ b/blogs/uncategorized.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -565,8 +565,6 @@

    - + @@ -225,7 +225,7 @@ @@ -578,8 +578,6 @@

    Additional filters

    - - diff --git a/category/community.html b/category/community.html index a5d9908639..ebac28a963 100644 --- a/category/community.html +++ b/category/community.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/category/news.html b/category/news.html index ed7a406a81..9e8e498b0c 100644 --- a/category/news.html +++ b/category/news.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -426,10 +426,6 @@

    News

    KubeVirt at KubeCon + CloudNativeCon North America
    - - Access Virtual Machines' graphic console using noVNC - - Prow jobs for KubeVirt website and Tutorial repo diff --git a/category/releases.html b/category/releases.html index 6122ec2529..94a49e8f03 100644 --- a/category/releases.html +++ b/category/releases.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/category/uncategorized.html b/category/uncategorized.html index 69e339cff1..0bfcd31231 100644 --- a/category/uncategorized.html +++ b/category/uncategorized.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/category/weekly-updates.html b/category/weekly-updates.html index a859629a08..d11858f48d 100644 --- a/category/weekly-updates.html +++ b/category/weekly-updates.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/client-go/index.html b/client-go/index.html index 026d3d68f7..6b1b804157 100644 --- a/client-go/index.html +++ b/client-go/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/cloud-provider-kubevirt/index.html b/cloud-provider-kubevirt/index.html index b3da0c08a9..9c58233329 100644 --- a/cloud-provider-kubevirt/index.html +++ b/cloud-provider-kubevirt/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/cluster-api-provider-external/index.html b/cluster-api-provider-external/index.html index 54bcb66831..65b5a793c5 100644 --- a/cluster-api-provider-external/index.html +++ b/cluster-api-provider-external/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/community/index.html b/community/index.html index 5112622c03..4c2379d839 100644 --- a/community/index.html +++ b/community/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/containerized-data-importer/index.html b/containerized-data-importer/index.html index a6327715b5..8c9d31176d 100644 --- a/containerized-data-importer/index.html +++ b/containerized-data-importer/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/controller-lifecycle-operator-sdk/index.html b/controller-lifecycle-operator-sdk/index.html index 63914fe23d..5ee8ff37da 100644 --- a/controller-lifecycle-operator-sdk/index.html +++ b/controller-lifecycle-operator-sdk/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/cpu-nfd-plugin/index.html b/cpu-nfd-plugin/index.html index fb380d1a62..5a88a149c9 100644 --- a/cpu-nfd-plugin/index.html +++ b/cpu-nfd-plugin/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/docs/index.html b/docs/index.html index 672c0c544a..baff28c669 100644 --- a/docs/index.html +++ b/docs/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/feed.xml b/feed.xml index 85c62edaae..858b6f99d1 100644 --- a/feed.xml +++ b/feed.xml @@ -1,4 +1,4 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed.xmlKubeVirt.ioVirtual Machine Management on KubernetesAnnouncing KubeVirt v1.12023-11-07T00:00:00+00:002023-11-07T00:00:00+00:00https://kubevirt.io//2023/Announcing-KubeVirt-v1-1The KubeVirt Community is very pleased to announce the release of KubeVirt v1.1. This comes 17 weeks after our celebrated v1.0 release, and follows the predictable schedule we moved to three releases ago to follow the Kubernetes release cadence.

    +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed.xmlKubeVirt.ioVirtual Machine Management on KubernetesAnnouncing KubeVirt v1.12023-11-07T00:00:00+00:002023-11-07T00:00:00+00:00https://kubevirt.io//2023/Announcing-KubeVirt-v1-1The KubeVirt Community is very pleased to announce the release of KubeVirt v1.1. This comes 17 weeks after our celebrated v1.0 release, and follows the predictable schedule we moved to three releases ago to follow the Kubernetes release cadence.

    You can read the full v1.1 release notes here, but we’ve asked the KubeVirt SIGs to summarize their largest successes, as well as one of the community members from Arm to list their integration accomplishments for this release.

    diff --git a/feed/community.xml b/feed/community.xml index cd2276b5eb..3ec35cc010 100644 --- a/feed/community.xml +++ b/feed/community.xml @@ -1 +1 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed/community.xmlKubeVirt.io | CommunityVirtual Machine Management on Kubernetes \ No newline at end of file +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed/community.xmlKubeVirt.io | CommunityVirtual Machine Management on Kubernetes \ No newline at end of file diff --git a/feed/news.xml b/feed/news.xml index 20014555bd..b84d3db87f 100644 --- a/feed/news.xml +++ b/feed/news.xml @@ -1,4 +1,4 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed/news.xmlKubeVirt.io | NewsVirtual Machine Management on KubernetesAnnouncing KubeVirt v1.12023-11-07T00:00:00+00:002023-11-07T00:00:00+00:00https://kubevirt.io//2023/Announcing-KubeVirt-v1-1The KubeVirt Community is very pleased to announce the release of KubeVirt v1.1. This comes 17 weeks after our celebrated v1.0 release, and follows the predictable schedule we moved to three releases ago to follow the Kubernetes release cadence.

    +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed/news.xmlKubeVirt.io | NewsVirtual Machine Management on KubernetesAnnouncing KubeVirt v1.12023-11-07T00:00:00+00:002023-11-07T00:00:00+00:00https://kubevirt.io//2023/Announcing-KubeVirt-v1-1The KubeVirt Community is very pleased to announce the release of KubeVirt v1.1. This comes 17 weeks after our celebrated v1.0 release, and follows the predictable schedule we moved to three releases ago to follow the Kubernetes release cadence.

    You can read the full v1.1 release notes here, but we’ve asked the KubeVirt SIGs to summarize their largest successes, as well as one of the community members from Arm to list their integration accomplishments for this release.

    diff --git a/feed/releases.xml b/feed/releases.xml index 2ac884dd08..04d8a6be04 100644 --- a/feed/releases.xml +++ b/feed/releases.xml @@ -1,4 +1,4 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed/releases.xmlKubeVirt.io | ReleasesVirtual Machine Management on KubernetesKubeVirt v1.1.02023-11-06T00:00:00+00:002023-11-06T00:00:00+00:00https://kubevirt.io//2023/changelog-v1.1.0v1.1.0

    +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed/releases.xmlKubeVirt.io | ReleasesVirtual Machine Management on KubernetesKubeVirt v1.1.02023-11-06T00:00:00+00:002023-11-06T00:00:00+00:00https://kubevirt.io//2023/changelog-v1.1.0v1.1.0
  • Released on: Mon Nov 6 16:28:56 2023 +0000

    diff --git a/feed/uncategorized.xml b/feed/uncategorized.xml index ad2bfd3555..8460874e62 100644 --- a/feed/uncategorized.xml +++ b/feed/uncategorized.xml @@ -1,4 +1,4 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed/uncategorized.xmlKubeVirt.io | UncategorizedVirtual Machine Management on KubernetesMonitoring KubeVirt VMs from the inside2020-12-10T00:00:00+00:002020-12-10T00:00:00+00:00https://kubevirt.io//2020/Monitoring-KubeVirt-VMs-from-the-insideMonitoring KubeVirt VMs from the inside

    +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed/uncategorized.xmlKubeVirt.io | UncategorizedVirtual Machine Management on KubernetesMonitoring KubeVirt VMs from the inside2020-12-10T00:00:00+00:002020-12-10T00:00:00+00:00https://kubevirt.io//2020/Monitoring-KubeVirt-VMs-from-the-insideMonitoring KubeVirt VMs from the inside

    This blog post will guide you on how to monitor KubeVirt Linux based VirtualMachines with Prometheus node-exporter. Since node_exporter will run inside the VM and expose metrics at an HTTP endpoint, you can use this same guide to expose custom applications that expose metrics in the Prometheus format.

    diff --git a/feed/updates.xml b/feed/updates.xml index 6e562ff7e3..bda3361647 100644 --- a/feed/updates.xml +++ b/feed/updates.xml @@ -1,4 +1,4 @@ -Jekyll2023-11-30T23:52:13+00:00https://kubevirt.io//feed/updates.xmlKubeVirt.io | UpdatesVirtual Machine Management on KubernetesThis Week In Kube Virt 232018-04-27T00:00:00+00:002018-04-27T00:00:00+00:00https://kubevirt.io//2018/This-Week-in-Kube-Virt-23This is a close-to weekly update from the KubeVirt team.

    +Jekyll2023-12-01T15:57:25+00:00https://kubevirt.io//feed/updates.xmlKubeVirt.io | UpdatesVirtual Machine Management on KubernetesThis Week In Kube Virt 232018-04-27T00:00:00+00:002018-04-27T00:00:00+00:00https://kubevirt.io//2018/This-Week-in-Kube-Virt-23This is a close-to weekly update from the KubeVirt team.

    In general there is now more work happening outside of the core kubevirt repository.

    diff --git a/galleries/2020-01-31-DevConfCZ2020-in-pictures.html b/galleries/2020-01-31-DevConfCZ2020-in-pictures.html index c06e342a24..aebb4ebb65 100644 --- a/galleries/2020-01-31-DevConfCZ2020-in-pictures.html +++ b/galleries/2020-01-31-DevConfCZ2020-in-pictures.html @@ -241,7 +241,7 @@ diff --git a/galleries/2020-02-03-Fosdem2020-communty-presence.html b/galleries/2020-02-03-Fosdem2020-communty-presence.html index 0ae2045768..2298ff3fd7 100644 --- a/galleries/2020-02-03-Fosdem2020-communty-presence.html +++ b/galleries/2020-02-03-Fosdem2020-communty-presence.html @@ -241,7 +241,7 @@ diff --git a/gallery/index.html b/gallery/index.html index d91fdea69d..036d303674 100644 --- a/gallery/index.html +++ b/gallery/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/hostpath-provisioner-operator/index.html b/hostpath-provisioner-operator/index.html index ad916b111e..d611989c49 100644 --- a/hostpath-provisioner-operator/index.html +++ b/hostpath-provisioner-operator/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/hostpath-provisioner/index.html b/hostpath-provisioner/index.html index 94cf33b94f..54379f0852 100644 --- a/hostpath-provisioner/index.html +++ b/hostpath-provisioner/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/index.html b/index.html index cd065dd31c..ec943db859 100644 --- a/index.html +++ b/index.html @@ -50,7 +50,7 @@ - + @@ -226,7 +226,7 @@ diff --git a/kubevirt/index.html b/kubevirt/index.html index bb2a04dcb3..4f53548a2a 100644 --- a/kubevirt/index.html +++ b/kubevirt/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/labs/index.html b/labs/index.html index 2ae162b8d3..90caef678d 100644 --- a/labs/index.html +++ b/labs/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/labs/kubernetes/lab1.html b/labs/kubernetes/lab1.html index 6dccf0b9e0..d0800b91ad 100644 --- a/labs/kubernetes/lab1.html +++ b/labs/kubernetes/lab1.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/labs/kubernetes/lab2.html b/labs/kubernetes/lab2.html index 788ffc7339..8e9e6cdc80 100644 --- a/labs/kubernetes/lab2.html +++ b/labs/kubernetes/lab2.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/labs/kubernetes/lab3.html b/labs/kubernetes/lab3.html index 686c130668..0483ce6f94 100644 --- a/labs/kubernetes/lab3.html +++ b/labs/kubernetes/lab3.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/labs/kubernetes/migration.html b/labs/kubernetes/migration.html index d253840c7b..3e4e6bff48 100644 --- a/labs/kubernetes/migration.html +++ b/labs/kubernetes/migration.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/machine-remediation/index.html b/machine-remediation/index.html index d7d3b5c1fb..857570e1dd 100644 --- a/machine-remediation/index.html +++ b/machine-remediation/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/managed-tenant-quota/index.html b/managed-tenant-quota/index.html index 5b99bffd97..6610ee98e3 100644 --- a/managed-tenant-quota/index.html +++ b/managed-tenant-quota/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/node-maintenance-operator/index.html b/node-maintenance-operator/index.html index b704f66bae..87d30abce3 100644 --- a/node-maintenance-operator/index.html +++ b/node-maintenance-operator/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/privacy/index.html b/privacy/index.html index 4956053334..ae4c8c7f8f 100644 --- a/privacy/index.html +++ b/privacy/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/qe-tools/index.html b/qe-tools/index.html index 53165f17ff..b39cec9131 100644 --- a/qe-tools/index.html +++ b/qe-tools/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/quickstart_cloud/index.html b/quickstart_cloud/index.html index 92d737a66a..c7d4972bab 100644 --- a/quickstart_cloud/index.html +++ b/quickstart_cloud/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/quickstart_kind/index.html b/quickstart_kind/index.html index 55ed4c6336..7dc44ce83e 100644 --- a/quickstart_kind/index.html +++ b/quickstart_kind/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/quickstart_minikube/index.html b/quickstart_minikube/index.html index b56e5b6285..8c8ddd8b92 100644 --- a/quickstart_minikube/index.html +++ b/quickstart_minikube/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/search.html b/search.html index 7ef7f5ce6f..9e02a76944 100644 --- a/search.html +++ b/search.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -857,7 +857,7 @@

    "title": "KubeVirt user interface options", "author" : "Alberto Losada Grande, Pedro Ibáñez Requena", "tags" : "octant, okd, openshift console, cockpit, noVNC, user interface, web interface, virtVNC, OKD console", - "body": " The user interface (UI), in the industrial design field of human–computer interaction, is the space where interactions between humans and machines occur. The goal of this interaction is to allow effective operation and control of the machine from the human end, whilst the machine simultaneously feeds back information that aids the operators’ decision-making process. Wikipedia:User Interface In this blogpost we show the results of a research about the different options existing in the market to enable KubeVirt with a user interface to manage, access and control the life cycle of the Virtual Machines inside Kubernetes with KubeVirt. The different UI options available for KubeVirt that we have been checking, at the moment of writing this article, are the following: Octant OKD: The Origin Community Distribution of Kubernetes OpenShift console running on vanilla Kubernetes Cockpit noVNCOctant: As the Octant webpage claims: Octant is an open-source developer-centric web interface for Kubernetes that lets you inspect a Kubernetes cluster and its applications. Octant is a tool for developers to understand how applications run on a Kubernetes cluster. It aims to be part of the developer’s toolkit for gaining insight and approaching complexity found in Kubernetes. Octant offers a combination of introspective tooling, cluster navigation, and object management along with a plugin system to further extend its capabilities. Some of the key features of this tool can be checked in their latest release notes: Resource Viewer: Graphically visualize relationships between objects in a Kubernetes cluster. The status of individual objects is represented by colour to show workload performance. Summary View: Consolidated status and configuration information in a single page aggregated from output typically found using multiple kubectl commands. Port Forward: Forward a local port to a running pod with a single button for debugging applications and even port forward multiple pods across namespaces. Log Stream: View log streams of pod and container activity for troubleshooting or monitoring without holding multiple terminals open. Label Filter: Organize workloads with label filtering for inspecting clusters with a high volume of objects in a namespace. Cluster Navigation: Easily change between namespaces or contexts across different clusters. Multiple kubeconfig files are also supported. Plugin System: Highly extensible plugin system for users to provide additional functionality through gRPC. Plugin authors can add components on top of existing views. We installed it and found out that: Octant provides a very basic dashboard for Kubernetes and it is pretty straightforward to install. It can be installed in your laptop or in a remote server. Regular Kubernetes objects can be seen from the UI. Pod logs can be checked as well. However, mostly everything is in view mode, even the YAML description of the objects. Therefore, as a developer or cluster operator you cannot edit YAML files directly from the UI Custom resources (CRs) and custom resource definitions (CRDs) are automatically detected and shown in the UI. This means that KubeVirt CRs can be viewed from the dashboard. However, VirtualMachines and VirtualMachineInstances cannot be modified from Octant, they can only be deleted. There is an option to extend the functionality adding plugins to the dashboard. No specific options to manage KubeVirt workloads have been found. With further work and investigation, it could be an option to develop a specific plugin to enable remote console or VNC access to KubeVirt workloads. OKD: The Origin Community Distribution of Kubernetes: As defined in the official webpage: OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is the upstream Kubernetes distribution embedded in Red Hat OpenShift. OKD embeds Kubernetes and extends it with security and other integrated concepts. OKD is also referred to as Origin in github and in the documentation. An OKD release corresponds to the Kubernetes distribution - for example, OKD 1. 10 includes Kubernetes 1. 10. A few weeks ago Kubernetes distribution OKD4 was released as preview. OKD is the official upstream version of Red Hat’s OpenShift. Since OpenShift includes KubeVirt (Red Hat calls it CNV) as a tech-preview feature since a couple of releases, there is already a lot of integration going on between OKD console and KubeVirt. Note that OKD4 is in preview, which means that only a subset of platforms and functionality will be available until it reaches beta. That being said, we have we found a similar behaviour as testing KubeVirt with OpenShift. We have noticed that from the UI a user can: Install the KubeVirt operator from the operator marketplace. Create Virtual Machines by importing YAML files or following a wizard. The wizard prevents you from moving to the next screen until you provide values in the required fields. Modify the status of the Virtual Machine: stop, start, migrate, clone, edit label, edit annotations, edit CD-ROMs and delete Edit network interfaces. It is possible to add multiple network interfaces to the VM. Add disks to the VM Connect to the VM via serial or VNC console. Edit the YAML object files online. Create VM templates. The web console features an interactive wizard that guides you through the Basic Settings, Networking, and Storage screens to simplify the process of creating virtual machine templates. Check VM events in real time. Gather metrics and utilization of the VM. Pretty much everything you can do with KubeVirt from the command line. One of the drawbacks is that the current KubeVirt HCO operator contains KubeVirt version 0. 18. 1, which is quite outdated. Note that last week version 0. 24 of KubeVirt was released. Using such an old release could cause some issues when creating VMs using newer container disk images. For instance, we have not been able to run the latest Fedora cloud container disk image and instead we were forced to use the one tagged as v0. 18. 1 which matches the version of KubeVirt deployed. If for any reason there is a need to deploy the latest version, it can be done by running the following script which applies directly the HCO operator: unreleased bundles using the hco without marketplace. Note that in this case automatic updates to KubeVirt are not triggered or advised automatically in OKD as it happens with the operator. OpenShift console (bridge): There is actually a KubeVirt Web User Interface, however the standalone project was deprecated in favor of OpenShift Console where it is included as a plugin. As we reviewed previously the OpenShift web console is just another piece inside OKD. It is an independent part and, as it is stated in their official GitHub repository, it can run on top of native Kubernetes. OpenShift Console a. k. a bridge is defined as: a friendly kubectl in the form of a single page web application. It also integrates with other services like monitoring, chargeback, and OLM. Some things that go on behind the scenes include: Proxying the Kubernetes API under /api/kubernetes Providing additional non-Kubernetes APIs for interacting with the cluster Serving all frontend static assets User AuthenticationThen, as briefly explained in their repository, our Kubernetes cluster can be configured to run the OpenShift Console and leverage its integrations with KubeVirt. Features related to KubeVirt are similar to the ones found in the OKD installation except: KubeVirt installation is done using the Hyperconverged Cluster Operator (HCO) without OLM or Marketplace instead of the KubeVirt operator. Therefore, available updates to KubeVirt are not triggered or advised automatically Virtual Machines objects can only be created from YAML. Although the wizard dialog is still available in the console, it does not function properly because it uses specific OpenShift objects under the hood. These objects are not available in our native Kubernetes deployment. Connection to the VM via serial or VNC console is flaky. VM templates can only be created from YAML. The wizard dialog is based on OpenShift templates. Note that the OpenShift console documentation briefly points out how to integrate the OpenShift console with a native Kubernetes deployment. It is uncertain if it can be installed in any other Kubernetes cluster. Cockpit: When testing cockpit in a CentOS 7 server with a Kubernetes cluster and KubeVirt we have realised that some of the containers/k8s features have to be enabled installing extra cockpit packages: To see the containers and images the package cockpit-docker has to be installed, then a new option called containers appears in the menu. To see the k8s cluster the package cockpit-kubernetes has to be installed and a new tab appears in the left menu. The new options allow you to: Overview: filtering by project, it shows Pods, volumes, Nodes, services and resources used. Nodes: nodes and the resources used are being shown here. Containers: a full list of containers and some metadata about them is displayed in this option. Topology: A graph with the pods, services and nodes is shown in this option. Details: allows to filter by project and type of resource and shows some metadata in the results. Volumes: allows to filter by project and shows the volumes with the type and the status. In CentOS 7 there are also the following packages: cockpit-machines. x86_64 : Cockpit user interface for virtual machines. If “virt-install” is installed, you can also create new virtual machines. It adds a new option in the main menu called Virtual Machines but it uses libvirt and is not KubeVirt related. cockpit-machines-ovirt. noarch : Cockpit user interface for oVirt virtual machines, like the package above but with support for ovirt. At the moment none of the cockpit complements has support for KubeVirt Virtual Machine. KubeVirt support for cockpit was removed from fedora 29 noVNC: noVNC is a JavaScript VNC client using WebSockets and HTML5 Canvas. It just allows you to connect through VNC to the virtual Machine already deployed in KubeVirt. No VM management or even a dashboard is enabled with this option, it’s a pure DIY code that can embed the VNC access to the VM into HTML in any application or webpage. There is a noVNC blogpost detailing how to install noVNC. In this animation you can see the feature of connecting to the Virtual Machine with noVNC: Summary: From the different options we have investigated, we can conclude that OpenShift Console along with OKD Kubernetes distribution provides a powerful way to manage and control our KubeVirt objects. From the user interface, a developer or operator can do pretty much everything you do in the command line. Additionally, users can create custom reusable templates to deploy their virtual machines with specific requirements. Wizard dialogs are provided as well in order to guide new users during the creation of their VMs. OpenShift Console can also be considered as an interesting option in case your KubeVirt installation is running on a native Kubernetes cluster. On the other hand, noVNC provides a lightweight interface to simply connect to the console of your virtual machine. Octant, although it does not have any specific integration with KubeVirt, looks like a promising Kubernetes user interface that could be extended to manage our KubeVirt instances in the future. Note We encourage our readers to let us know of user interfaces that can be used to manage our KubeVirt virtual machines. Then, we can include them in this list References: Octant OKD OKD Console Cockpit virtVNC, noVNC for Kubevirt" + "body": " The user interface (UI), in the industrial design field of human–computer interaction, is the space where interactions between humans and machines occur. The goal of this interaction is to allow effective operation and control of the machine from the human end, whilst the machine simultaneously feeds back information that aids the operators’ decision-making process. Wikipedia:User Interface In this blogpost we show the results of a research about the different options existing in the market to enable KubeVirt with a user interface to manage, access and control the life cycle of the Virtual Machines inside Kubernetes with KubeVirt. The different UI options available for KubeVirt that we have been checking, at the moment of writing this article, are the following: Octant OKD: The Origin Community Distribution of Kubernetes OpenShift console running on vanilla Kubernetes Cockpit noVNCOctant: As the Octant webpage claims: Octant is an open-source developer-centric web interface for Kubernetes that lets you inspect a Kubernetes cluster and its applications. Octant is a tool for developers to understand how applications run on a Kubernetes cluster. It aims to be part of the developer’s toolkit for gaining insight and approaching complexity found in Kubernetes. Octant offers a combination of introspective tooling, cluster navigation, and object management along with a plugin system to further extend its capabilities. Some of the key features of this tool can be checked in their latest release notes: Resource Viewer: Graphically visualize relationships between objects in a Kubernetes cluster. The status of individual objects is represented by colour to show workload performance. Summary View: Consolidated status and configuration information in a single page aggregated from output typically found using multiple kubectl commands. Port Forward: Forward a local port to a running pod with a single button for debugging applications and even port forward multiple pods across namespaces. Log Stream: View log streams of pod and container activity for troubleshooting or monitoring without holding multiple terminals open. Label Filter: Organize workloads with label filtering for inspecting clusters with a high volume of objects in a namespace. Cluster Navigation: Easily change between namespaces or contexts across different clusters. Multiple kubeconfig files are also supported. Plugin System: Highly extensible plugin system for users to provide additional functionality through gRPC. Plugin authors can add components on top of existing views. We installed it and found out that: Octant provides a very basic dashboard for Kubernetes and it is pretty straightforward to install. It can be installed in your laptop or in a remote server. Regular Kubernetes objects can be seen from the UI. Pod logs can be checked as well. However, mostly everything is in view mode, even the YAML description of the objects. Therefore, as a developer or cluster operator you cannot edit YAML files directly from the UI Custom resources (CRs) and custom resource definitions (CRDs) are automatically detected and shown in the UI. This means that KubeVirt CRs can be viewed from the dashboard. However, VirtualMachines and VirtualMachineInstances cannot be modified from Octant, they can only be deleted. There is an option to extend the functionality adding plugins to the dashboard. No specific options to manage KubeVirt workloads have been found. With further work and investigation, it could be an option to develop a specific plugin to enable remote console or VNC access to KubeVirt workloads. OKD: The Origin Community Distribution of Kubernetes: As defined in the official webpage: OKD is a distribution of Kubernetes optimized for continuous application development and multi-tenant deployment. OKD adds developer and operations-centric tools on top of Kubernetes to enable rapid application development, easy deployment and scaling, and long-term lifecycle maintenance for small and large teams. OKD is the upstream Kubernetes distribution embedded in Red Hat OpenShift. OKD embeds Kubernetes and extends it with security and other integrated concepts. OKD is also referred to as Origin in github and in the documentation. An OKD release corresponds to the Kubernetes distribution - for example, OKD 1. 10 includes Kubernetes 1. 10. A few weeks ago Kubernetes distribution OKD4 was released as preview. OKD is the official upstream version of Red Hat’s OpenShift. Since OpenShift includes KubeVirt (Red Hat calls it CNV) as a tech-preview feature since a couple of releases, there is already a lot of integration going on between OKD console and KubeVirt. Note that OKD4 is in preview, which means that only a subset of platforms and functionality will be available until it reaches beta. That being said, we have we found a similar behaviour as testing KubeVirt with OpenShift. We have noticed that from the UI a user can: Install the KubeVirt operator from the operator marketplace. Create Virtual Machines by importing YAML files or following a wizard. The wizard prevents you from moving to the next screen until you provide values in the required fields. Modify the status of the Virtual Machine: stop, start, migrate, clone, edit label, edit annotations, edit CD-ROMs and delete Edit network interfaces. It is possible to add multiple network interfaces to the VM. Add disks to the VM Connect to the VM via serial or VNC console. Edit the YAML object files online. Create VM templates. The web console features an interactive wizard that guides you through the Basic Settings, Networking, and Storage screens to simplify the process of creating virtual machine templates. Check VM events in real time. Gather metrics and utilization of the VM. Pretty much everything you can do with KubeVirt from the command line. One of the drawbacks is that the current KubeVirt HCO operator contains KubeVirt version 0. 18. 1, which is quite outdated. Note that last week version 0. 24 of KubeVirt was released. Using such an old release could cause some issues when creating VMs using newer container disk images. For instance, we have not been able to run the latest Fedora cloud container disk image and instead we were forced to use the one tagged as v0. 18. 1 which matches the version of KubeVirt deployed. If for any reason there is a need to deploy the latest version, it can be done by running the following script which applies directly the HCO operator: unreleased bundles using the hco without marketplace. Note that in this case automatic updates to KubeVirt are not triggered or advised automatically in OKD as it happens with the operator. OpenShift console (bridge): There is actually a KubeVirt Web User Interface, however the standalone project was deprecated in favor of OpenShift Console where it is included as a plugin. As we reviewed previously the OpenShift web console is just another piece inside OKD. It is an independent part and, as it is stated in their official GitHub repository, it can run on top of native Kubernetes. OpenShift Console a. k. a bridge is defined as: a friendly kubectl in the form of a single page web application. It also integrates with other services like monitoring, chargeback, and OLM. Some things that go on behind the scenes include: Proxying the Kubernetes API under /api/kubernetes Providing additional non-Kubernetes APIs for interacting with the cluster Serving all frontend static assets User AuthenticationThen, as briefly explained in their repository, our Kubernetes cluster can be configured to run the OpenShift Console and leverage its integrations with KubeVirt. Features related to KubeVirt are similar to the ones found in the OKD installation except: KubeVirt installation is done using the Hyperconverged Cluster Operator (HCO) without OLM or Marketplace instead of the KubeVirt operator. Therefore, available updates to KubeVirt are not triggered or advised automatically Virtual Machines objects can only be created from YAML. Although the wizard dialog is still available in the console, it does not function properly because it uses specific OpenShift objects under the hood. These objects are not available in our native Kubernetes deployment. Connection to the VM via serial or VNC console is flaky. VM templates can only be created from YAML. The wizard dialog is based on OpenShift templates. Note that the OpenShift console documentation briefly points out how to integrate the OpenShift console with a native Kubernetes deployment. It is uncertain if it can be installed in any other Kubernetes cluster. Cockpit: When testing cockpit in a CentOS 7 server with a Kubernetes cluster and KubeVirt we have realised that some of the containers/k8s features have to be enabled installing extra cockpit packages: To see the containers and images the package cockpit-docker has to be installed, then a new option called containers appears in the menu. To see the k8s cluster the package cockpit-kubernetes has to be installed and a new tab appears in the left menu. The new options allow you to: Overview: filtering by project, it shows Pods, volumes, Nodes, services and resources used. Nodes: nodes and the resources used are being shown here. Containers: a full list of containers and some metadata about them is displayed in this option. Topology: A graph with the pods, services and nodes is shown in this option. Details: allows to filter by project and type of resource and shows some metadata in the results. Volumes: allows to filter by project and shows the volumes with the type and the status. In CentOS 7 there are also the following packages: cockpit-machines. x86_64 : Cockpit user interface for virtual machines. If “virt-install” is installed, you can also create new virtual machines. It adds a new option in the main menu called Virtual Machines but it uses libvirt and is not KubeVirt related. cockpit-machines-ovirt. noarch : Cockpit user interface for oVirt virtual machines, like the package above but with support for ovirt. At the moment none of the cockpit complements has support for KubeVirt Virtual Machine. KubeVirt support for cockpit was removed from fedora 29 noVNC: noVNC is a JavaScript VNC client using WebSockets and HTML5 Canvas. It just allows you to connect through VNC to the virtual Machine already deployed in KubeVirt. No VM management or even a dashboard is enabled with this option, it’s a pure DIY code that can embed the VNC access to the VM into HTML in any application or webpage. Summary: From the different options we have investigated, we can conclude that OpenShift Console along with OKD Kubernetes distribution provides a powerful way to manage and control our KubeVirt objects. From the user interface, a developer or operator can do pretty much everything you do in the command line. Additionally, users can create custom reusable templates to deploy their virtual machines with specific requirements. Wizard dialogs are provided as well in order to guide new users during the creation of their VMs. OpenShift Console can also be considered as an interesting option in case your KubeVirt installation is running on a native Kubernetes cluster. On the other hand, noVNC provides a lightweight interface to simply connect to the console of your virtual machine. Octant, although it does not have any specific integration with KubeVirt, looks like a promising Kubernetes user interface that could be extended to manage our KubeVirt instances in the future. Note We encourage our readers to let us know of user interfaces that can be used to manage our KubeVirt virtual machines. Then, we can include them in this list References: Octant OKD OKD Console Cockpit virtVNC, noVNC for Kubevirt" }, { "id": 79, "url": "/2019/KubeVirt_lab2_experiment_with_cdi.html", @@ -902,1187 +902,1173 @@

    "body": "The KubeCon + CloudNativeCon North America 2019conference is next week in San Diego, California. KubeVirt will have a presence at the event and this post highlights someactivities that will have a KubeVirt focus there. Sessions: There are two sessions covering KubeVirt specifically: On Tuesday at 2:25 PM Chandrakanth Jakkidi and Steve Gordon will present anintroduction to KubeVirt that will cover thebackground of the project, its motivation and use cases, and an architecturaloverview and demo. On Wednesday at 10:55 AM David Vossel and Vishesh Tanksale will take a deep-diveon virtualized GPU workloads on KubeVirt where theywill show KubeVirt’s capabilities around host device passthrough using NVIDIAGPU workloads as a case study. Users and Contributors gathering: KubeVirt users and contributors will get together to talk about KubeVirt,brainstorm ideas to help us shape the project’s next steps, and generally getsome face to face time. If you are already using or contributing to KubeVirt, you are considering to tryit, or just want to present your use case and discuss KubeVirt’s fit or needswe’d be very glad to meet you there. Red Hat is sponsoring a venue for the meetup right next to the conference’svenue. Space is limited, so we are asking people to register inadvance. Demos: KubeVirt will also be featured in a couple of demos at the Red Hat booth inthe exposition hall. You can find the demo schedule at their event landing page. Keeping in touch: Follow @kubevirt on Twitter for updates. We look forward to seeing you at KubeCon + CloudNativeCon! " }, { "id": 85, - "url": "/2019/Access-Virtual-Machines-graphic-console-using-noVNC.html", - "title": "Access Virtual Machines' graphic console using noVNC", - "author" : "Yafei Bao", - "tags" : "noVNC, console", - "body": "Introduction: NoVNC is a JavaScript VNC client using WebSockets and HTML5 Canvas. We provide websocket api for VNC access under APISERVER:/apis/subresources. kubevirt. io/v1alpha3/namespaces/NAMESPACE/virtualmachineinstances/VM/vncbut we can not access the VNC api directly since authorization is needed. In order to solve the problem, we provide a component using kubectl proxy to provide a authorized vnc acess, we name this Component virtVNC. In this post we are going to show how to do this in detail. The detailed method: Prepare Docker Image: First prepare docker build dicrectory. mkdir -p virtvnc/staticThen clone noVNC files from github. git clone https://github. com/novnc/noVNCAnd then copy noVNC files to docker build directory. cp noVNC/app virtvnc/static/cp noVNC/core virtvnc/static/cp noVNC/vender virtvnc/static/cp noVNC/*. html virtvnc/static/Create a file index. html to virtvnc/static/ with the following content. The page will display VMs and corresponding VNC links. <html> <meta charset= utf-8 > <style> td { padding: 5px; } . button { background-color: white; border: 2px solid black; color: black; padding: 5px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; -webkit-transition-duration: 0. 4s; transition-duration: 0. 4s; } . button:hover{ background-color: black; color: white; cursor: pointer; } button[disabled] { opacity: . 65; } button[disabled]:hover { color: black; background: white; } </style> <!-- Promise polyfill for IE11 --> <script src= vendor/promise. js ></script> <!-- ES2015/ES6 modules polyfill --> <script nomodule src= vendor/browser-es-module-loader/dist/browser-es-module-loader. js ></script> <script type= module crossorigin= anonymous > import * as WebUtil from . /app/webutil. js ; const apiPrefix='k8s/apis' function loadVMI(namespace) { WebUtil. fetchJSON('/' + apiPrefix + '/kubevirt. io/v1alpha3/namespaces/' + namespace + '/virtualmachineinstances/') . then((resp) => { let vmis = []; resp. items. forEach(i => { let tr = document. createElement('tr'); tr. innerHTML= <td> + i. metadata. name + </td><td> + String(i. status. phase) + </td><td> + String(i. status. interfaces !== undefined ? i. status. interfaces[0]. ipAddress : '') + </td><td> + String(i. status. nodeName !== undefined ? i. status. nodeName : '') + </td><td><button class='button' + String(i. status. phase == Running ? : disabled ) + onclick=\ window. open('vnc_lite. html?path= + apiPrefix + /subresources. kubevirt. io/v1alpha3/namespaces/ + namespace + /virtualmachineinstances/ + i. metadata. name + /vnc', 'novnc_window', 'resizable=yes,toolbar=no,location=no,status=no,scrollbars=no,menubar=no,width=1030,height=800')\ >VNC</button></td> ; document. getElementById( vmis ). appendChild(tr); }); if (resp. items. length === 0) { document. body. append( No virtual machines in the namespace. ); } }) . catch(err => console. log( Failed to get vmis: + err)); } let namespace = WebUtil. getQueryVar('namespace', 'default'); loadVMI(namespace); </script> </meta> <body> <table><tbody id= vmis > </tbody></table> </body></html>Create dockerfile with following content to add static html files and set up kubectl proxy command line args. FROM quay. io/bitnami/kubectl:1. 15ADD static /staticCMD [ proxy , --www=/static , --accept-hosts=^. *$ , --address=[::] , --api-prefix=/k8s/ , --www-prefix= ]Finally use docker build to build docker image. cd virtvncdocker build -t quay. io/samblade/virtvnc:v0. 1 . Setting Up RBAC: Create a service account for virtvnc. apiVersion: v1kind: ServiceAccountmetadata: name: virtvnc namespace: kubevirtThen define cluster role for kubevirt, setting up permissions needed. kind: ClusterRoleapiVersion: rbac. authorization. k8s. io/v1metadata: name: virtvncrules: - apiGroups: - subresources. kubevirt. io resources: - virtualmachineinstances/console - virtualmachineinstances/vnc verbs: - get - apiGroups: - kubevirt. io resources: - virtualmachines - virtualmachineinstances - virtualmachineinstancepresets - virtualmachineinstancereplicasets - virtualmachineinstancemigrations verbs: - get - list - watchAnd then binding cluster role to service accout. kind: ClusterRoleBindingapiVersion: rbac. authorization. k8s. io/v1metadata: name: virtvncsubjects: - kind: ServiceAccount name: virtvnc namespace: kubevirtroleRef: kind: ClusterRole name: virtvnc apiGroup: rbac. authorization. k8s. ioDeploy to kubernetes: Create following yaml, and then apply to kubernetes to setup virtvnc deployment. apiVersion: extensions/v1beta1kind: Deploymentmetadata: name: virtvnc namespace: kubevirtspec: replicas: 1 selector: matchLabels: app: virtvnc template: metadata: labels: app: virtvnc spec: serviceAccountName: virtvnc nodeSelector: node-role. kubernetes. io/master: tolerations: - key: node-role. kubernetes. io/master operator: Equal value: effect: NoSchedule containers: - name: virtvnc image: quay. io/samblade/virtvnc:v0. 1 livenessProbe: httpGet: port: 8001 path: / scheme: HTTP failureThreshold: 30 initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5Expose a NodePort service, then we can access the web page from node network. apiVersion: v1kind: Servicemetadata: labels: app: virtvnc name: virtvnc namespace: kubevirtspec: ports: - port: 8001 protocol: TCP targetPort: 8001 selector: app: virtvnc type: NodePortNote This will make all your virtual machines vnc & console accessible to node network. ** The Simple Way: In this github repo and registry you’ll find a ready to use version of the above which you can deploy in a single command like this: kubectl apply -f https://github. com/wavezhang/virtVNC/raw/master/k8s/virtvnc. yamlAccess VNC: First get node port of virtvnc service. kubectl get svc -n kubevirt virtvncThen visit the following url in browser: http://NODEIP:NODEPORT/If you want manage virtual machines in other namespace, you can specify namespace using query param namespace like following: http://NODEIP:NODEPORT/?namespace=test References: Embedding and Deploying noVNC Application Kubevirt Api Access Control Use an HTTP Proxy to Access the Kubernetes API" - }, { - "id": 86, "url": "/2019/changelog-v0.23.0.html", "title": "KubeVirt v0.23.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 23. 0: Released on: Mon Nov 4 16:42:54 2019 +0100 Guest OS Information is available under the VMI status now Updated to Go 1. 12. 8 and latest bazel Updated go-yaml to v2. 2. 4, which has a ddos vulnerability fixed Cleaned up and fixed CRD scheme registration Several bugfixes Many CI improvements (e. g. more logs in case of test failures)" }, { - "id": 87, + "id": 86, "url": "/2019/prow-jobs-for-kubevirt.html", "title": "Prow jobs for KubeVirt website and Tutorial repo", "author" : "Pablo Iranzo Gómez", "tags" : "prow, infrastructure, kubevirt-tutorial, CI-CD, continuous integration, community", "body": "Introduction: Prow is a Kubernetes based CI/CD system that has several types of jobs and is used at KubeVirt project. General PR’s, etc are tested by Prow to be validated to be reviewed by doing some sanity checks defined by developers. In general, the internals on how it works can be checked at Life of a Prow Job. Community repositories: There are other repos (involved in the project ecosystem) that have tests to validate the information provided on them. The community repositories include: KubeVirt website KubeVirt tutorial Katacoda Scenarios Community repo Cloud Image BuilderThose repos contain useful information for new users, like the try-it scenarios, the Laboratories, Katacoda scenarios, Community supporting files (like logos, proposals, etc). The jobs: For each repo we’ve some types of jobs: periodical: Run automatically to validate that the repo, without further changes is still working (for example, detecting broken URL’s). presubmit: Validates that the incoming PR will not break the environment. post-submit: After merging the PR, the repo is still working. Jobs are defined in the project-infra repository, for example: https://github. com/kubevirt/project-infra/blob/master/github/ci/prow-deploy/files/jobs/kubevirt/kubevirt-tutorial/kubevirt-tutorial-periodics. yaml https://github. com/kubevirt/project-infra/blob/master/github/ci/prow-deploy/files/jobs/kubevirt/kubevirt-tutorial/kubevirt-tutorial-presubmits. yamlThose jobs define the image to use (image and tag), and the commands to execute. In the examples above we’re using ‘Docker-in-Docker’ (dind) images and we’re targetting the KubeVirt-tutorial repository. KubeVirt-tutorial: The jobs, when executed as part of the Prow workflow, run the commands defined in the repo itself, for example for kubevirt-tutorial check the following folder: https://github. com/kubevirt/kubevirt-tutorial/tree/master/hackThat folder contains three scripts: build, test_lab and tests, which do setup the environment for running the validations, that is: install required software on top of the used images. prepare the scripts to be executed via mdsh which extracts markdown from lab files to be executed against the cluster setup by Prow (using dind). Run each script and report statusOnce the execution has finished, if the final status is ok, the status is reported back to the GitHub PR so that it can be reviewed by mantainers of the repo. Job status: The jobs executed and the logs are available on the Prow instance we use, for example: https://KubeVirt. io Pre-submit link checker: https://prow. apps. ovirt. org/?job=kubevirt-io-presubmit-link-checker Periodical link checker: https://prow. apps. ovirt. org/?job=kubevirt-io-periodic-link-checker KubeVirt Tutorial Pre-submit: https://prow. apps. ovirt. org/?job=kubevirt-tutorial-presubmit-lab-testing-k8s-1. 13. 3 Periodical: https://prow. apps. ovirt. org/?job=periodic-kubevirt-tutorial-lab-testing Wrap-up: If you find that a test should be performed to further validate the integrity and information provided, feel free to raise issues or even a PR against the project-infra repository so that we can get it improved! " }, { - "id": 88, + "id": 87, "url": "/2019/jenkins-jobs-for-kubevirt-lab-validation.html", "title": "Jenkins Jobs for KubeVirt lab validation", "author" : "Pablo Iranzo Gómez", "tags" : "prow, infrastructure, kubevirt-tutorial, CI-CD, continuous integration, jenkins", "body": "Introduction: Jenkins is an open-source automation server that allows to define jobs, triggers, etc to validate that certain conditions are met. Jobs can run either after a trigger has been received (for example from a repo merge or PR), periodically to validate that a previous ‘validated’ job is still ‘valid’ or even manually to force refresh of information. Community repositories: Outside of the main KubeVirt binaries, there are other repos that are involved in the project ecosystem have tests to validate the information provided on them. The community repositories include: KubeVirt website KubeVirt tutorial Katacoda Scenarios Community repo Cloud Image BuilderThose repos contain useful information for new users, like the try-it scenarios, the Laboratories, Katacoda scenarios, Community supporting files (like logos, proposals, etc). The jobs: Our Jenkins instance is hosted at CentOS OpenShift instance and it’s available at https://jenkins-kubevirt. apps. ci. centos. org/ There, we’ve two jobs we’re currently refining to get better results: Cloud Image Builder / https://jenkins-kubevirt. apps. ci. centos. org/job/cloud-image-builder, which builds, according to the repo defined above contents what the AWS, GCP and Minikube images contain (binaries, KubeVirt version, Minikube version). The resulting AWS images / https://jenkins-kubevirt. apps. ci. centos. org/job/cloud-image-builder/job/master/lastSuccessfulBuild/artifact/new-images. json are copied to each region. The resulting GCP images are also copied The resulting Minikube image is used for lab validation Lab Validation / https://jenkins-kubevirt. apps. ci. centos. org/job/Lab%20Validation/ which uses above created images with the contents of the /tests folder at Kubevirt. github. io repository to spin up instances and validate that the contents of the labs are validBoth tests can be executed periodically (by default each day), causing a repository rescan to detect new changes and later validation of them and only on branch master. If you’re curious about what Jenkins does, check the file JenkinsFile at the root of each repository: https://github. com/kubevirt/kubevirt. github. io/blob/abd315b2bcdabd2effa71fd3e6af1207d8fcbf42/Jenkinsfile https://github. com/kubevirt/cloud-image-builder/blob/master/JenkinsfileBoth of them define pipelines so that runs can be executed in parallel for each one of the environments: GCP, AWS, Minikube. Cloud Image Builder: Cloud Image Builder has primarily two steps: build publishBuild takes most of the logic, as it prepares virtctl and kubectl binaries and then for each one of the environments it executes the required ansible playbooks: ${environment}-provision. yml: Which creates the VM instance on the provider (for Minikube, it’s a VM inside GCP) ${environment}-setup. yml: Which configures the VM instance (repositories, packages, first-boot script, KubeVirt installation, virtctl binaries, etc) ${environment}-mkimage. yml: Which creates an image out of the instance generated by steps above ${environment}-publish. yml: Which, for GCP and AWS, publishes the generated image in above stepOnce the images have been published, the jobs end and instances are removed from the providers. Lab Validation: Lab Validation is meant to check that the labs described in the website are working on the three platforms (GCE, AWS, Minikube). In opposition to KubeVirt Tutorial, it doesn’t use mdsh yet for extracting the actual commands out of the lab text and uses ansible playbooks to imitate the lab execution: https://github. com/kubevirt/kubevirt. github. io/blob/abd315b2bcdabd2effa71fd3e6af1207d8fcbf42/tests/ansible/lab1. yml https://github. com/kubevirt/kubevirt. github. io/blob/abd315b2bcdabd2effa71fd3e6af1207d8fcbf42/tests/ansible/lab2. ymlIn addition, it contains files for also setting up the instance for running the tests (${environment}-provision. yml) and doing the later cleanup ($environment-cleanup. yml). The first playbook, does create a new instance on the environment being checked using the images created by the Cloud Image Builder, so this means that not only the labs are validated, but also the images generated are validated to detect possible defects like missing binaries, wrongly installed software, etc. The biggest part on the lab validation is with the lab. sh script, which accepts the lab being executed and the environment as parameters, and takes care of provisioning the instance, run the lab and perform cleanup afterwards. " }, { - "id": 89, + "id": 88, "url": "/2019/KubeVirt_storage_rook_ceph.html", "title": "Persistent storage of your Virtual Machines in KubeVirt with Rook", "author" : "Pedro Ibáñez Requena", "tags" : "rook, ceph, ntp, chronyd", "body": " Introduction: Quoting Wikipedia: In computer science, persistence refers to the characteristic of state that outlives the processthat created it. This is achieved in practice by storing the state as data in computer data storage. Programs have to transfer data to and from storage devices and have to provide mappings from thenative programming-language data structures to the storage device data structures. In this post, we are going to show how to set up a persistence system to store VM images with the help of Ceph and the automation of Rook. Pre-requisites: Some prerequisites have to be met: An existent Kubernetes cluster with 3 masters and 1 worker (min) is already set up, it’s not mandatory, but allows to demonstrate an example of a HA Ceph installation. Each Kubernetes node has an extra empty disk connected (has to be blank with no filesystem). KubeVirt is already installed and running. In this example the following systems names and IP addresses are used: System Purpose IP kv-master-00 Kubernetes Master node 00 192. 168. 122. 6 kv-master-01 Kubernetes Master node 01 192. 168. 122. 106 kv-master-02 Kubernetes Master node 02 192. 168. 122. 206 kv-worker-00 Kubernetes Worker node 00 192. 168. 122. 222 For being able to import Virtual Machines, the KubeVirt CDI has to be configured too. Containerized-Data-Importer (CDI) is a persistent storage management add-on for Kubernetes. Its primary goal is to provide a declarative way to build Virtual Machine Disks on PVCs for KubeVirt VMs. CDI works with standard core Kubernetes resources and is storage device-agnostic, while its primary focus is to build disk images for Kubevirt, it’s also useful outside of a KubeVirt context to use for initializing your Kubernetes Volumes with data. In the case your cluster doesn’t have CDI, the following commands will cover CDI operator and the CR setup: [root@kv-master-00 ~]# export VERSION=$(curl -s https://github. com/kubevirt/containerized-data-importer/releases/latest | grep -o v[0-9]\+\. [0-9]*\. [0-9]* )[root@kv-master-00 ~]# kubectl create -f https://github. com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator. yamlnamespace/cdi createdcustomresourcedefinition. apiextensions. k8s. io/cdis. cdi. kubevirt. io createdconfigmap/cdi-operator-leader-election-helper createdserviceaccount/cdi-operator createdclusterrole. rbac. authorization. k8s. io/cdi-operator-cluster createdclusterrolebinding. rbac. authorization. k8s. io/cdi-operator createddeployment. apps/cdi-operator createdcontainerized-data-importer/releases/download/$VERSION/cdi-operator. yaml[root@kv-master-00 ~]# kubectl create -f https://github. com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr. yamlcdi. cdi. kubevirt. io/cdi createdThe nodes of the cluster have to be time synchronized This should have been done for you by chronyd It can’t harm to do it again: [root@kv-master-00 ~]# for i in $(echo 6 106 206 222); do ssh -oStrictHostKeyChecking=no \ root@192. 168. 122. $i sudo chronyc -a makestep; doneWarning: Permanently added '192. 168. 122. 6' (ECDSA) to the list of known hosts. 200 OKWarning: Permanently added '192. 168. 122. 106' (ECDSA) to the list of known hosts. 200 OKWarning: Permanently added '192. 168. 122. 206' (ECDSA) to the list of known hosts. 200 OKWarning: Permanently added '192. 168. 122. 222' (ECDSA) to the list of known hosts. 200 OKThis step could also be done with ansible (one line or rhel-system-roles. noarch). Installing Rook in Kubernetes to handle the Ceph cluster: Next, the latest upstream release of Rook has to be cloned: [root@kv-master-00 ~]# git clone https://github. com/rook/rookCloning into 'rook'. . . remote: Enumerating objects: 1, done. remote: Counting objects: 100% (1/1), done. remote: Total 37745 (delta 0), reused 0 (delta 0), pack-reused 37744Receiving objects: 100% (37745/37745), 13. 02 MiB | 1. 54 MiB/s, done. Resolving deltas: 100% (25309/25309), done. Now, change the actual directory to the location of the Kubernetes examples where the respective resource definitions can be found: [root@kv-master-00 ~]# cd rook/cluster/examples/kubernetes/cephThe Rook common resources that make up Rook have to be created: [root@kv-master-00 ~]# kubectl create -f common. yaml(output removed)Next, create the Kubernetes Rook operator: [root@kv-master-00 ~]# kubectl create -f operator. yamldeployment. apps/rook-ceph-operator createdTo check the progress of the operator pod and the discovery pods starting up, the commands below can be executed. The discovery pods are responsible for investigating the available resources (e. g. disks that can make up OSD’s) across all available Nodes: [root@kv-master-00 ~]# watch kubectl get pods -n rook-cephNAME READY STATUS RESTARTS AGErook-ceph-operator-fdfbcc5c5-qs7x8 1/1 Running 1 3m14srook-discover-7v65m 1/1 Running 2 2m19srook-discover-cjfdz 1/1 Running 0 2m19srook-discover-f8k4s 0/1 ImagePullBackOff 0 2m19srook-discover-x22hh 1/1 Running 0 2m19sNAME READY STATUS RESTARTS AGEpod/rook-ceph-operator-fdfbcc5c5-qs7x8 1/1 Running 1 4m21spod/rook-discover-7v65m 1/1 Running 2 3m26spod/rook-discover-cjfdz 1/1 Running 0 3m26spod/rook-discover-f8k4s 1/1 Running 0 3m26spod/rook-discover-x22hh 1/1 Running 0 3m26sAfter, the Ceph cluster configuration inside of the Rook operator has to be prepared: [root@kv-master-00 ~]# kubectl create -f cluster. yamlcephcluster. ceph. rook. io/rook-ceph createdOne of the key elements of the default cluster configuration is to configure the Ceph cluster to use all nodes and use all devices, i. e. run Rook/Ceph on every system and consume any free disks that it finds; this makes configuring Rook a lot more simple: [root@kv-master-00 ~]# grep useAll cluster. yml useAllNodes: true useAllDevices: true # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the namedThe progress can be checked now, check the pods in the rook-ceph namespace: [root@kv-master-00 ~]# watch kubectl -n rook-ceph get podsNAME READY STATUS RESTARTS AGEcsi-cephfsplugin-2kqbd 3/3 Running 0 36scsi-cephfsplugin-hjnf9 3/3 Running 0 36scsi-cephfsplugin-provisioner-75c965db4f-tbgfn 4/4 Running 0 36scsi-cephfsplugin-provisioner-75c965db4f-vgcwv 4/4 Running 0 36scsi-cephfsplugin-svcjk 3/3 Running 0 36scsi-cephfsplugin-tv6rs 3/3 Running 0 36scsi-rbdplugin-dsdwk 3/3 Running 0 37scsi-rbdplugin-provisioner-69c9869dc9-bwjv4 5/5 Running 0 37scsi-rbdplugin-provisioner-69c9869dc9-vzzp9 5/5 Running 0 37scsi-rbdplugin-vzhzz 3/3 Running 0 37scsi-rbdplugin-w5n6x 3/3 Running 0 37scsi-rbdplugin-zxjcc 3/3 Running 0 37srook-ceph-mon-a-canary-84c7fc67ff-pf7t5 1/1 Running 0 14srook-ceph-mon-b-canary-5f7c7cfbf4-8dvcp 1/1 Running 0 8srook-ceph-mon-c-canary-7779478497-7x25x 0/1 ContainerCreating 0 3srook-ceph-operator-fdfbcc5c5-qs7x8 1/1 Running 1 9m30srook-discover-7v65m 1/1 Running 2 8m35srook-discover-cjfdz 1/1 Running 0 8m35srook-discover-f8k4s 1/1 Running 0 8m35srook-discover-x22hh 1/1 Running 0 8m35sWait until the Ceph monitor pods are created. Next, the toolbox pod has to be created; this is useful to verify the status/health of the cluster, getting/setting authentication, and querying the Ceph cluster using standard Ceph tools: [root@kv-master-00 ~]# kubectl create -f toolbox. yamldeployment. apps/rook-ceph-tools createdTo check how well this is progressing: [root@kv-master-00 ~]# kubectl -n rook-ceph get pods | grep toolrook-ceph-tools-856c5bc6b4-s47qm 1/1 Running 0 31sBefore proceeding with the pool and the storage class the Ceph cluster status can be checked already: [root@kv-master-00 ~]# toolbox=$(kubectl -n rook-ceph get pods -o custom-columns=NAME:. metadata. name --no-headers | grep tools)[root@kv-master-00 ~]# kubectl -n rook-ceph exec -it $toolbox shsh-4. 2# ceph status cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_WARN clock skew detected on mon. c services: mon: 3 daemons, quorum a,b,c (age 3m) mgr: a(active, since 2m) osd: 4 osds: 4 up (since 105s), 4 in (since 105s) data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs:Note In this example, the health value is HEALTH_WARN because there is a clock skew between the monitor in node c and the rest of the cluster. If this is your case, go to the troubleshooting point at the end of the blogpost to find out how to solve this issue and get a HEALTH_OK. Next, some other resources need to be created. First, the block pool that defines the name (and specification) of the RBD pool that will be used for creating persistent volumes, in this case, is called replicapool: Configuring the CephBlockPool and the Kubernetes StorageClass for using Ceph hosting the Virtual Machines: The cephblockpool. yml is based in the pool. yml, you can check that file in the same directory to learn about the details of each parameter: [root@kv-master-00 ~]# cat pool. yml################################################################################################################## Create a Ceph pool with settings for replication in production environments. A minimum of 3 OSDs on# different hosts are required in this example. # kubectl create -f pool. yaml#################################################################################################################apiVersion: ceph. rook. io/v1kind: CephBlockPoolmetadata: name: replicapool namespace: rook-cephspec: # The failure domain will spread the replicas of the data across different failure zones failureDomain: host # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy. replicated: size: 3 # A key/value list of annotations annotations: # key: valueThe following file has to be created to define the CephBlockPool: [root@kv-master-00 ~]# vim cephblockpool. ymlapiVersion: ceph. rook. io/v1kind: CephBlockPoolmetadata: name: replicapool namespace: rook-cephspec: failureDomain: host replicated: size: 2[root@kv-master-00 ~]# kubectl create -f cephblockpool. ymlcephblockpool. ceph. rook. io/replicapool created[root@kv-master-00 ~]# kubectl get cephblockpool -n rook-cephNAME AGEreplicapool 19sNow is time to create the Kubernetes storage class that would be used to create the volumes later: [root@kv-master-00 ~]# vim storageclass. ymlapiVersion: storage. k8s. io/v1kind: StorageClassmetadata: name: rook-ceph-block# Change rook-ceph provisioner prefix to match the operator namespace if neededprovisioner: rook-ceph. rbd. csi. ceph. comparameters: # clusterID is the namespace where the rook cluster is running clusterID: rook-ceph # Ceph pool into which the RBD image shall be created pool: replicapool # RBD image format. Defaults to 2 . imageFormat: 2 # RBD image features. Available for imageFormat: 2 . CSI RBD currently supports only `layering` feature. imageFeatures: layering # The secrets contain Ceph admin credentials. csi. storage. k8s. io/provisioner-secret-name: rook-ceph-csi csi. storage. k8s. io/provisioner-secret-namespace: rook-ceph csi. storage. k8s. io/node-stage-secret-name: rook-ceph-csi csi. storage. k8s. io/node-stage-secret-namespace: rook-ceph # Specify the filesystem type of the volume. If not specified, csi-provisioner # will set default as `ext4`. csi. storage. k8s. io/fstype: xfs# Delete the rbd volume when a PVC is deletedreclaimPolicy: Delete[root@kv-master-00 ~]# kubectl create -f storageclass. ymlstorageclass. storage. k8s. io/rook-ceph-block created[root@kv-master-00 ~]# kubectl get storageclassNAME PROVISIONER AGErook-ceph-block rook-ceph. rbd. csi. ceph. com 61sSpecial attention to the pool name, it has to be the same as configured in the CephBlockPool. Now, simply wait for the Ceph OSD’s to finish provisioning and we’ll be done with our Ceph deployment: [root@kv-master-00 ~]# watch kubectl -n rook-ceph get pods | grep rook-ceph-osd-prepare rook-ceph-osd-prepare-kv-master-00. kubevirt-io-4npmf 0/1 Completed 0 20mrook-ceph-osd-prepare-kv-master-01. kubevirt-io-69smd 0/1 Completed 0 20mrook-ceph-osd-prepare-kv-master-02. kubevirt-io-zm7c2 0/1 Completed 0 20mrook-ceph-osd-prepare-kv-worker-00. kubevirt-io-5qmjg 0/1 Completed 0 20mThis process may take a few minutes as it has to zap the disks, deploy a BlueStore configuration on them, and start the OSD service pods across our nodes. The cluster deployment can be validated now: [root@kv-master-00 ~]# kubectl -n rook-ceph exec -it $toolbox shsh-4. 2# ceph -s cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_WARN too few PGs per OSD (4 < min 30) services: mon: 3 daemons, quorum a,b,c (age 12m) mgr: a(active, since 21m) osd: 4 osds: 4 up (since 20m), 4 in (since 20m) data: pools: 1 pools, 8 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs: 8 active+cleanOh Wait! the health value is again HEALTH_WARN, no problem! it is because there are too few PGs per OSD, in this case 4, for a minimum value of 30. Let’s fix it changing that value to 256: [root@kv-master-00 ~]# kubectl -n rook-ceph exec -it $toolbox shsh-4. 2# ceph osd pool set replicapool pg_num 256set pool 1 pg_num to 256sh-4. 2# ceph -s cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_OK services: mon: 3 daemons, quorum a,b,c (age 18m) mgr: a(active, since 27m) osd: 4 osds: 4 up (since 26m), 4 in (since 26m) data: pools: 1 pools, 256 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs: 12. 109% pgs unknown 0. 391% pgs not active 224 active+clean 31 unknown 1 peeringIn a moment, Ceph will end peering and the status of the pgs would be active+clean: sh-4. 2# ceph -s cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_OK services: mon: 3 daemons, quorum a,b,c (age 21m) mgr: a(active, since 29m) osd: 4 osds: 4 up (since 28m), 4 in (since 28m) data: pools: 1 pools, 256 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs: 256 active+cleanSome additional checks on the Ceph cluster can be performed: sh-4. 2# ceph osd treeID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF-1 0. 07434 root default-9 0. 01859 host kv-master-00-kubevirt-io 3 hdd 0. 01859 osd. 3 up 1. 00000 1. 00000-7 0. 01859 host kv-master-01-kubevirt-io 2 hdd 0. 01859 osd. 2 up 1. 00000 1. 00000-3 0. 01859 host kv-master-02-kubevirt-io 0 hdd 0. 01859 osd. 0 up 1. 00000 1. 00000-5 0. 01859 host kv-worker-00-kubevirt-io 1 hdd 0. 01859 osd. 1 up 1. 00000 1. 00000sh-4. 2# ceph osd status+----+--------------------------+-------+-------+--------+---------+--------+---------+-----------+| id | host | used | avail | wr ops | wr data | rd ops | rd data | state |+----+--------------------------+-------+-------+--------+---------+--------+---------+-----------+| 0 | kv-master-02. kubevirt-io | 1026M | 17. 9G | 0 | 0 | 0 | 0 | exists,up || 1 | kv-worker-00. kubevirt-io | 1026M | 17. 9G | 0 | 0 | 0 | 0 | exists,up || 2 | kv-master-01. kubevirt-io | 1026M | 17. 9G | 0 | 0 | 0 | 0 | exists,up || 3 | kv-master-00. kubevirt-io | 1026M | 17. 9G | 0 | 0 | 0 | 0 | exists,up |+----+--------------------------+-------+-------+--------+---------+--------+---------+-----------+That should match the available block devices in the nodes, let’s check it in the kv-master-00 node: [root@kv-master-00 ~]# lsblkNAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTsr0 11:0 1 512K 0 romvda 253:0 0 50G 0 disk└─vda1 253:1 0 50G 0 part /vdb 253:16 0 20G 0 disk└─ceph--09112f92--11cd--4284--b763--447065cc169c-osd--data--0102789c--852c--4696--96ce--54c2ad3a848b 252:0 0 19G 0 lvmTo validate that the pods are running on the correct nodes, check the NODE column below: [root@kv-master-00 ~]# kubectl get pods -n rook-ceph -o wide | egrep '(NAME|osd)'NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESrook-ceph-osd-0-8689c68c78-rgdbj 1/1 Running 0 31m 10. 244. 2. 9 kv-master-02. kubevirt-io <none> <none>rook-ceph-osd-1-574cb85d9d-vs2jc 1/1 Running 0 31m 10. 244. 3. 18 kv-worker-00. kubevirt-io <none> <none>rook-ceph-osd-2-65b54c458f-zkk6v 1/1 Running 0 31m 10. 244. 1. 10 kv-master-01. kubevirt-io <none> <none>rook-ceph-osd-3-5fd97cd4c9-2xd6c 1/1 Running 0 30m 10. 244. 0. 10 kv-master-00. kubevirt-io <none> <none>rook-ceph-osd-prepare-kv-master-00. kubevirt-io-4npmf 0/1 Completed 0 31m 10. 244. 0. 9 kv-master-00. kubevirt-io <none> <none>rook-ceph-osd-prepare-kv-master-01. kubevirt-io-69smd 0/1 Completed 0 31m 10. 244. 1. 9 kv-master-01. kubevirt-io <none> <none>rook-ceph-osd-prepare-kv-master-02. kubevirt-io-zm7c2 0/1 Completed 0 31m 10. 244. 2. 8 kv-master-02. kubevirt-io <none> <none>rook-ceph-osd-prepare-kv-worker-00. kubevirt-io-5qmjg 0/1 Completed 0 31m 10. 244. 3. 17 kv-worker-00. kubevirt-io <none> <none>All good! For validating the storage provisioning through the new Ceph cluster managed by the Rook operator, a persistent volume claim (PVC) can be created: [root@kv-master-00 ~]# vim pvc. ymlapiVersion: v1kind: PersistentVolumeClaimmetadata: name: pv-claimspec: storageClassName: rook-ceph-block accessModes: - ReadWriteOnce resources: requests: storage: 1Gi[root@kv-master-00 ceph]# kubectl create -f pvc. ymlpersistentvolumeclaim/pv-claim createdEnsure that the storageClassName contains the name of the storage class you have created, in this case, rook-ceph-block For checking that it has been bound, list the PVCs and look for the ones in the rook-ceph-block storageclass: [root@kv-master-00 ~]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEpv-claim Bound pvc-62a9738a-e027-4a68-9ecf-16278711ff64 1Gi RWO rook-ceph-block 63sIf the volume is still in a ‘Pending’ state, likely, that one of the pods haven’t come up correctly or one of the steps above has been missed. To check it, the command ‘kubectl get pods -n rook-ceph’ can be executed for viewing the running/failed pods. Before proceeding let’s clean up the temporary PVC: [root@kv-master-00 ~]# kubectl delete pvc pv-claimpersistentvolumeclaim pv-claim deletedCreating a Virtual Machine in KubeVirt backed by Ceph: Once the Ceph cluster is up and running, the first Virtual Machine can be created, to do so, a YML example file is being downloaded and modified: [root@kv-master-00 ~]# wget https://raw. githubusercontent. com/kubevirt/containerized-data-importer/master/manifests/example/vm-dv. yaml[root@kv-master-00 ~]# sed -i 's/hdd/rook-ceph-block/' vm-dv. yaml[root@kv-master-00 ~]# sed -i 's/fedora/centos/' vm-dv. yaml[root@kv-master-00 ~]# sed -i 's@https://download. cirros-cloud. net/0. 4. 0/cirros-0. 4. 0-x86_64-disk. img@http://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2@' vm-dv. yaml[root@kv-master-00 ~]# sed -i 's/storage: 100M/storage: 9G/' vm-dv. yaml[root@kv-master-00 ~]# sed -i 's/memory: 64M/memory: 1G/' vm-dv. yamlThe modified YAML could be run already like this but a user won’t be able to log in as we don’t know the password used in that image. cloud-init can be used to change the password of the default user of that image centos and grant us access, two parts have to be added: Add a second disk after the datavolumevolume (already existing), in this example is called cloudint:[root@kv-master-00 ~]# vim vm-dv. yaml---template: metadata: labels: kubevirt. io/vm: vm-datavolume spec: domain: devices: disks: - disk: bus: virtio name: datavolumevolume - disk: bus: virtio name: cloudinit Afterwards, add the volume at the end of the file, after the volume already defined as datavolumevolume, in this example it’s also called cloudinit:[root@kv-master-00 ~]# vim vm-dv. yaml---volumes: - dataVolume: name: centos-dv name: datavolumevolume - cloudInitNoCloud: userData: | #cloud-config password: changeme chpasswd: { expire: False } name: cloudinitThe password value (changeme in this example), can be set to your preferred one. Once the YAML file is prepared the Virtual Machine can be created and started: [root@kv-master-00 ~]# kubectl create -f vm-dv. yamlvirtualmachine. kubevirt. io/vm-centos-datavolume created[root@kv-master-00 ~]# kubectl get vmNAME AGE RUNNING VOLUMEvm-centos-datavolume 62m falseLet’s wait a little bit until the importer pod finishes, meanwhile you can check it with: [root@kv-master-00 ~]# kubectl get podsNAME READY STATUS RESTARTS AGEimporter-centos-dv-8v6l5 0/1 ContainerCreating 0 12sOnce that pods ends, the Virtual Machine can be started (in this case the virt parameter can be used because of the krew plugin system: [root@kv-master-00 tmp]# kubectl virt start vm-centos-datavolumeVM vm-centos-datavolume was scheduled to start[root@kv-master-00 ~]# kubectl get vmiNAME AGE PHASE IP NODENAMEvm-centos-datavolume 2m4s Running 10. 244. 3. 20 kv-worker-00. kubevirt-io[root@kv-master-00 ~]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEcentos-dv Bound pvc-5604eb4a-21dd-4dca-8bb7-fbacb0791402 9Gi RWO rook-ceph-block 7m34sAwesome! the Virtual Machine is running in a pod through KubeVirt and it’s backed up with Ceph under the management of Rook. Now it’s the time for grabbing a coffee to allow cloud-init to do its job. A little while later let’s connect to that VM console: [root@kv-master-00 ~]# kubectl virt console vm-centos-datavolumeSuccessfully connected to vm-centos-datavolume console. The escape sequence is ^]CentOS Linux 7 (Core)Kernel 3. 10. 0-957. 27. 2. el7. x86_64 on an x86_64vm-centos-datavolume login: centosPassword:[centos@vm-centos-datavolume ~]$And there it is! our Kubernetes cluster provided with virtualization capabilities thanks to KubeVirt and backed up with a strong Ceph cluster under the management of Rook. Troubleshooting: It might happen that once the Ceph cluster is created, the hosts are not properly time-synchronized, in that case, the Ceph configuration can be modified to allow a bigger time difference between the nodes, in this case, the variable mon clock drift allowed is changed to 0. 5 seconds, the steps to do so are the following: Connect to the toolbox pod to check the cluster status Modify the configMap with the Ceph cluster configuration Verify the changes Remove the mon pods to apply the new configuration[root@kv-master-00 ~]# kubectl -n rook-ceph exec -it $toolbox shsh-4. 2# ceph status cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_WARN clock skew detected on mon. c services: mon: 3 daemons, quorum a,b,c (age 3m) mgr: a(active, since 2m) osd: 4 osds: 4 up (since 105s), 4 in (since 105s) data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs:[root@kv-master-00 ~]# kubectl -n rook-ceph edit ConfigMap rook-config-override -o yamlconfig: | [global] mon clock drift allowed = 0. 5[root@kv-master-00 ~]# kubectl -n rook-ceph get ConfigMap rook-config-override -o yamlapiVersion: v1data: config: | [global] mon clock drift allowed = 0. 5kind: ConfigMapmetadata: creationTimestamp: 2019-10-18T14:08:39Z name: rook-config-override namespace: rook-ceph ownerReferences: - apiVersion: ceph. rook. io/v1 blockOwnerDeletion: true kind: CephCluster name: rook-ceph uid: d0bd3351-e630-44af-b981-550e8a2a50ec resourceVersion: 12831 selfLink: /api/v1/namespaces/rook-ceph/configmaps/rook-config-override uid: bdf1f1fb-967a-410b-a2bd-b4067ce005d2[root@kv-master-00 ~]# kubectl -n rook-ceph delete pod $(kubectl -n rook-ceph get pods -o custom-columns=NAME:. metadata. name --no-headers| grep mon)pod rook-ceph-mon-a-8565577958-xtznq deletedpod rook-ceph-mon-b-79b696df8d-qdcpw deletedpod rook-ceph-mon-c-5df78f7f96-dr2jn deleted[root@kv-master-00 ~]# kubectl -n rook-ceph exec -it $toolbox shsh-4. 2# ceph status cluster: id: 5a0bbe74-ce42-4f49-813d-7c434af65aad health: HEALTH_OK services: mon: 3 daemons, quorum a,b,c (age 43s) mgr: a(active, since 9m) osd: 4 osds: 4 up (since 8m), 4 in (since 8m) data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 4. 0 GiB used, 72 GiB / 76 GiB avail pgs:References: Kubernetes getting started KubeVirt Containerized Data Importer Ceph: free-software storage platform Ceph hardware recommendations Rook: Open-Source,Cloud-Native Storage for Kubernetes KubeVirt User Guide" }, { - "id": 90, + "id": 89, "url": "/2019/KubeVirt_k8s_crio_from_scratch_installing_KubeVirt.html", "title": "KubeVirt on Kubernetes with CRI-O from scratch - Installing KubeVirt", "author" : "Pedro Ibáñez Requena", "tags" : "cri-o, kubevirt installation", "body": "Building your environment for testing or automation purposes can be difficult when using different technologies. In this guide, you’ll find how to set up your system step-by-step to work with the latest versions of Kubernetes (up to today), CRI-O and KubeVirt. In this series of blogposts the following topics are going to be covered en each post: Requirements: dependencies and containers runtime Kubernetes: Cluster and Network KubeVirt: requirements and first Virtual MachineIn the first blogpost of the series (KubeVirt on Kubernetes with CRI-O from scratch) the initial set up for a CRI-O runtime environment has been covered. In the second blogpost of the series (Kubernetes: Cluster and Network) the Kubernetes cluster and network were set up based on the CRI-O installation already prepared in the first post. This is the last blogpost of the series of 3, in this case KubeVirt is going to be installed and also would be used to deploy an example Virtual Machine. Installing KubeVirt: What is KubeVirt? if you navigate to the KubeVirt webpage you can read: KubeVirt technology addresses the needs of development teams that have adopted or want to adopt Kubernetes but possess existing Virtual Machine-based workloads that cannot be easily containerized. More specifically, the technology provides a unified development platform where developers can build, modify, and deploy applications residing in both Application Containers as well as Virtual Machines in a common, shared environment. Benefits are broad and significant. Teams with a reliance on existing virtual machine-based workloads are empowered to rapidly containerize applications. With virtualized workloads placed directly in development workflows, teams can decompose them over time while still leveraging remaining virtualized components as is comfortably desired. In this example there is a Kubernetes cluster compose of one master, for it to be schedulable to host the KubeVirt pods, a little modification has to be done: k8s-test. local# kubectl taint nodes k8s-test node-role. kubernetes. io/master:NoSchedule-The last version of KubeVirt at the moment is v0. 20. 8, to check it the following command can be executed: k8s-test. local# export KUBEVIRT_VERSION=$(curl -s https://api. github. com/repos/kubevirt/kubevirt/releases/latest | jq -r . tag_name)k8s-test. local# echo $KUBEVIRT_VERSIONv0. 20. 8To install KubeVirt, the operator and the cr are going to be created with the following commands: k8s-test. local# kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator. yamlk8s-test. local# kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr. yamlThis demo environment already runs within a virtualized environment, and in order to be able to run VMs here we need to pre-configure KubeVirt so it uses software-emulated virtualization instead of trying to use real hardware virtualization. k8s-test. local# kubectl create configmap kubevirt-config -n kubevirt --from-literal debug. useEmulation=trueThe deployment can be checked with the following command: k8s-test. local# kubectl get pods -n kubevirtNAME READY STATUS RESTARTS AGEvirt-api-5546d58cc8-5sm4v 1/1 Running 0 16hvirt-api-5546d58cc8-pxkgt 1/1 Running 0 16hvirt-controller-5c749d77bf-cxxj8 1/1 Running 0 16hvirt-controller-5c749d77bf-wwkxm 1/1 Running 0 16hvirt-handler-cx7q7 1/1 Running 0 16hvirt-operator-6b4dccb44d-bqxld 1/1 Running 0 16hvirt-operator-6b4dccb44d-m2mvf 1/1 Running 0 16hNow that KubeVirt is installed is the right time to download the client tool to interact with th Virtual Machines. k8s-test. local# wget -O virtctl https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/virtctl-${KUBEVIRT_VERSION}-linux-amd64k8s-test. local# chmod +x virtctlk8s-test. local# . /virtctlAvailable Commands: console Connect to a console of a virtual machine instance. expose Expose a virtual machine instance, virtual machine, or virtual machine instance replica set as a new service. help Help about any command image-upload Upload a VM image to a PersistentVolumeClaim. restart Restart a virtual machine. start Start a virtual machine. stop Stop a virtual machine. version Print the client and server version information. vnc Open a vnc connection to a virtual machine instance. Use virtctl <command> --help for more information about a given command. Use virtctl options for a list of global command-line options (applies to all commands). This step is optional, right now anything related with the Virtual Machines can be done running the virtctl command. In case there’s a need to interact with the Virtual Machines without leaving the scope of the kubectl command, the virt plugin for Krew can be installed following the instructions below: k8s-test. local# ( set -x; cd $(mktemp -d) && curl -fsSLO https://github. com/kubernetes-sigs/krew/releases/download/v0. 3. 1/krew. {tar. gz,yaml} && tar zxvf krew. tar. gz && . /krew- $(uname | tr '[:upper:]' '[:lower:]')_amd64 install \ --manifest=krew. yaml --archive=krew. tar. gz). . . Installed plugin: krewWARNING: You installed a plugin from the krew-index plugin repository. These plugins are not audited for security by the Krew maintainers. Run them at your own risk. The warning printed by the Krew maintainers can be ignored. To have the krew plugin available, the PATH variable has to be modified: k8s-test. local# vim ~/. bashrcexport PATH= ${KREW_ROOT:-$HOME/. krew}/bin:$PATH k8s-test. local# source ~/. bashrcNow, the virt plugin is going to be installed using the krew plugin manager: k8s-test. local# kubectl krew install virtUpdated the local copy of plugin index. Installing plugin: virtCAVEATS:\ | virt plugin is a wrapper for virtctl originating from the KubeVirt project. In order to use virtctl you will | need to have KubeVirt installed on your Kubernetes cluster to use it. See https://kubevirt. io/ for details | | Run | | kubectl virt help | | to get an overview of the available commands | | See | | https://kubevirt. io/user-guide/virtual_machines/graphical_and_console_access/ | | for a usage example/Installed plugin: virtWARNING: You installed a plugin from the krew-index plugin repository. These plugins are not audited for security by the Krew maintainers. Run them at your own risk. Installing the first Virtual Machine in KubeVirt: For this example, a cirros Virtual Machine is going to be created, in this example, the kind of disk used is a registry disk (not persistent): k8s-test. local# kubectl apply -f https://kubevirt. io/labs/manifests/vm. yamlk8s-test. local# kubectl get vmsNAME AGE RUNNING VOLUMEtestvm 13s falseAfter the Virtual Machine has been created, it has to be started, to do so, the virtctl or the kubectl can be used (depending on what method has been chosen in previous steps). k8s-test. local# . /virtctl start testvmVM vm-cirros was scheduled to startk8s-test. local# kubectl get vmsNAME AGE RUNNING VOLUMEtestvm 7m11s trueNext thing to do is to use the kubectl command for getting the IP address and the actual status of the virtual machines: k8s-test. local# kubectl get vmiskubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 14s Schedulingk8s-test. local# kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 63s Running 10. 244. 0. 15 k8s-testSo, finally the Virtual Machine is running and has an IP address. To connect to that VM, the console can be used (. /virtctl console testvm) or also a direct connection with SSH can be made: k8s-test. local# ssh cirros@10. 244. 0. 15cirros@10. 244. 0. 15's password: gocubsgo$ uname -aLinux testvm 4. 4. 0-28-generic #47-Ubuntu SMP Fri Jun 24 10:09:13 UTC 2016 x86_64 GNU/Linux$ exitTo stop the Virtual Machine one of the following commands can be executed: k8s-test. local# . /virtctl stop testvmVM testvm was scheduled to stopk8s-test. local# kubectl virt stop testvmVM testvm was scheduled to stopTroubleshooting: Each step of this guide has a place where to look for possible issues, in general, the troubleshooting guide of kubernetes can be checked. The following list tries to ease the possible troubleshooting in case of problems during each step of this guide: CRI-O: check the status of the CRI-O service systemctl status crio and also the messages in the journal journalctl -u crio -lf Kubernetes: check the status of the Kubelet service systemctl status kubelet and also the messages in the journal journalctl -u kubelet -fl Pods: for checking the status of the pods the kubectl command can be used in different ways kubectl get pods -A kubectl describe pod $pod Nodes: a Ready status would mean everything is ok with the node, otherwise the details of that node can be checked. kubectl get nodes -o wide kubectl get node <nodename> -o yaml KubeVirt: to check the status of the KubeVirt pods use kubectl get pods -n kubevirtReferences: Kubernetes getting started Kubernetes installing kubeadm Running CRI-O with kubeadm Kubernetes pod-network configuration Kubectl cheatsheet Multus KubeVirt User Guide KubeVirt Katacoda scenarios" }, { - "id": 91, + "id": 90, "url": "/2019/KubeVirt_k8s_crio_from_scratch_installing_kubernetes.html", "title": "KubeVirt on Kubernetes with CRI-O from scratch - Installing Kubernetes", "author" : "Pedro Ibáñez Requena", "tags" : "cri-o, kubernetes, ansible", "body": "Building your environment for testing or automation purposes can be difficult when using different technologies. In this guide you’ll find how to set up your system step-by-step to work with the latest versions of Kubernetes (up to today), CRI-O and KubeVirt. In this series of blogposts the following topics are going to be covered en each post: Requirements: dependencies and containers runtime Kubernetes: Cluster and Network KubeVirt: requirements and first Virtual MachineIn the first blogpost of the series (KubeVirt on Kubernetes with CRI-O from scratch) the initial set up for a CRI-O runtime environment has been covered. In this post is shown the installation and configuration of Kubernetes based in the previous CRI-O environment. Installing Kubernetes: If the ansible way was chosen, you may want to skip this section since the repository and needed packages were already installed during execution. To install the K8s packages a new repo has to be added: k8s-test. local# vim /etc/yum. repos. d/kubernetes. repo[Kubernetes]name=Kubernetesbaseurl=https://packages. cloud. google. com/yum/repos/kubernetes-el7-x86_64enabled=1gpgcheck=1repo_gpgcheck=1gpgkey=https://packages. cloud. google. com/yum/doc/yum-key. gpghttps://packages. cloud. google. com/yum/doc/rpm-package-key. gpgNow, the gpg keys of the packages can be imported into the system and the installation can proceed: k8s-test. local# rpm --import https://packages. cloud. google. com/yum/doc/yum-key. gpg https://packages. cloud. google. com/yum/doc/rpm-package-key. gpgk8s-test. local# yum install -y kubelet kubeadm kubectlOnce the Kubelet is configured and CRI-O also ready, the CRI-O daemon can be started and the setup of the cluster can be done: Note The kubelet will not start successfully until the Kubernetes cluster is installed. k8s-test. local# systemctl restart criok8s-test. local# systemctl enable --now kubeletInstalling the Kubernetes cluster: There are multiple ways for installing a Kubernetes cluster, in this example it will be done with the command kubeadm, the pod network cidr is the same that has been previously used for the CRI-O bridge in the 10-crio-bridge. conf configuration file: k8s-test. local# kubeadm init --pod-network-cidr=10. 244. 0. 0/16When the installation finishes the command will print a similar message like this one: Your Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/. kube sudo cp -i /etc/kubernetes/admin. conf $HOME/. kube/config sudo chown $(id -u):$(id -g) $HOME/. kube/configYou should now deploy a pod network to the cluster. Run kubectl apply -f [podnetwork]. yaml with one of the options listed at: https://kubernetes. io/docs/concepts/cluster-administration/addons/Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192. 168. 0. 10:6443 --token 6fsrbi. iqsw1girupbwue5o \ --discovery-token-ca-cert-hash sha256:c7cf9d9681876856f9b7819067841436831f19004caadab0b5838a9bf7f4126aNow, it’s time to deploy the pod network. If the reader is curious and want to already check the status of the cluster, the following commands can be executed for getting all the pods running and their status: k8s-test. local# export KUBECONFIG=/etc/kubernetes/kubelet. confk8s-test. local# kubectl get pods -ANAMESPACE NAME READY STATUS RESTARTS AGEkube-system coredns-5644d7b6d9-ffnvx 1/1 Running 0 101skube-system coredns-5644d7b6d9-lh9gm 1/1 Running 0 101skube-system etcd-k8s-test 1/1 Running 0 59skube-system kube-apiserver-k8s-test 1/1 Running 0 54skube-system kube-controller-manager-k8s-test 1/1 Running 0 58skube-system kube-proxy-tdcdv 1/1 Running 0 101skube-system kube-scheduler-k8s-test 1/1 Running 0 50sInstalling the pod network: The Kubernetes pod-network documentation shows different add-on to handle the communications between the pods. In this example Virtual Machines will be deployed with KubeVirt and also they will have multiple network interfaces attached to the VMs, in this example Multus is going to be used. Some of the Multus Prerequisites indicate: After installing Kubernetes, you must install a default network CNI plugin. If you’re using kubeadm, refer to the “Installing a pod network add-on” section in the kubeadm documentation. If it’s your first time, we generally recommend using Flannel for the sake of simplicity. So flannel is going to be installed running the following commands: k8s-test. local# cd /rootk8s-test. local# wget https://raw. githubusercontent. com/coreos/flannel/master/Documentation/kube-flannel. ymlThe version of CNI has to be checked and ensured that is the 0. 3. 1 version, otherwise, it has to be changed, in this example the version 0. 2. 0 is replaced by the 0. 3. 1: k8s-test. local# grep cniVersion kube-flannel. yml cniVersion : 0. 2. 0 ,k8s-test. local# sed -i 's/0. 2. 0/0. 3. 1/g' kube-flannel. ymlk8s-test. local# kubectl apply -f kube-flannel. ymlpodsecuritypolicy. policy/psp. flannel. unprivileged createdclusterrole. rbac. authorization. k8s. io/flannel createdclusterrolebinding. rbac. authorization. k8s. io/flannel createdserviceaccount/flannel createdconfigmap/kube-flannel-cfg createddaemonset. apps/kube-flannel-ds-amd64 createddaemonset. apps/kube-flannel-ds-arm64 createddaemonset. apps/kube-flannel-ds-arm createddaemonset. apps/kube-flannel-ds-ppc64le createddaemonset. apps/kube-flannel-ds-s390x createdOnce the flannel network has been created the Multus can be defined, to check the status of the pods the following command can be executed: k8s-test. local# kubectl get pods -ANAMESPACE NAME READY STATUS RESTARTS AGEkube-system coredns-5644d7b6d9-9mfc9 1/1 Running 0 20hkube-system coredns-5644d7b6d9-sd6ck 1/1 Running 0 20hkube-system etcd-k8s-test 1/1 Running 0 20hkube-system kube-apiserver-k8s-test 1/1 Running 0 20hkube-system kube-controller-manager-k8s-test 1/1 Running 0 20hkube-system kube-flannel-ds-amd64-ml68d 1/1 Running 0 20hkube-system kube-proxy-lqjpv 1/1 Running 0 20hkube-system kube-scheduler-k8s-test 1/1 Running 0 20hTo load the multus configuration, the multus-cni repository has to be cloned, and also the kube-1. 16-change branch has to be used: k8s-test. local# git clone https://github. com/intel/multus-cni /root/src/github. com/multus-cnik8s-test. local# cd /root/src/github. com/multus-cnik8s-test. local# git checkout origin/kube-1. 16-changek8s-test. local# cd multus-cni/imagesTo load the multus daemonset the following command has to be executed: k8s-test. local# kubectl create -f multus-daemonset-crio. ymlcustomresourcedefinition. apiextensions. k8s. io/network-attachment-definitions. k8s. cni. cncf. io createdclusterrole. rbac. authorization. k8s. io/multus createdclusterrolebinding. rbac. authorization. k8s. io/multus createdserviceaccount/multus createdconfigmap/multus-cni-config createddaemonset. apps/kube-multus-ds-amd64 createddaemonset. apps/kube-multus-ds-ppc64le createdIn the next post KubeVirt: requirements and first Virtual Machine, the KubeVirt requirements will be set up together with the binaries and YAML files and also the first virtual Machines will be deployed. " }, { - "id": 92, + "id": 91, "url": "/2019/changelog-v0.22.0.html", "title": "KubeVirt v0.22.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 22. 0: Released on: Thu Oct 10 18:55:08 2019 +0200 Support for Nvidia GPUs and vGPUs exposed by Nvidia Kubevirt Device Plugin. VMIs now successfully start if they get a 0xfe prefixed MAC address assigned from the pod network Removed dependency on host semanage in SELinux Permissive mode Some changes as result of entering the CNCF sandbox (DCO check, FOSSA check, best practice badge) Many bug fixes and improvements in several areas CI: Introduced a OKD 4 test lane CI: Many improved tests, resulting in less flakyness" }, { - "id": 93, + "id": 92, "url": "/2019/KubeVirt_k8s_crio_from_scratch.html", "title": "KubeVirt on Kubernetes with CRI-O from scratch", "author" : "Pedro Ibáñez Requena", "tags" : "lab, cri-o, quickstart, homelab", "body": "Building your environment for testing or automation purposes can be difficult when using different technologies. In this guide you’ll find how to set up your system step-by-step to work with the latest versions up to today of Kubernetes, CRI-O and KubeVirt. In this series of blogposts the following topics are going to be covered en each post: Requirements: dependencies and containers runtime Kubernetes: Cluster and Network KubeVirt: requirements and first Virtual MachinePre-requisites: Versions: The following versions are going to be used: Software Purpose Version CentOS Operating System 7. 7. 1908 Kubernetes Orchestration v1. 16. 0 CRI-O Containers runtime 1. 16. 0-dev KubeVirt Virtual Machine Management on Kubernetes v0. 20. 7 Ansible (optional) Automation tool 2. 8. 4 Requirements: It is a requirement to have a Virtual Machine (VM) with enough resources, in my case I am running a 16GB memory and 4vCPUs VM, but should probably be run with less resources. Operating System (OS) running this VM as indicated in the table above has to be CentOS 7. 7. 1908 and you should take care of its deployment. In my lab I used latest Centos 7 cloud image to speed up the provisioning process. In this guide the system will be named k8s-test. local and the IP address is 192. 168. 0. 10. A second system called laptop would be used to run the playbooks (if you choose to go the easy and automated way). It is also needed to have access to the root account in the VM for installing the required software and configure some kernel parameters. In this example only a Kubernetes master would be used. Instructions: Preparing the VM: Ensure the VM system is updated to the latest versions of the software and also ensure that the epel repository is installed: k8s-test. local# yum install epel-release -yk8s-test. local# yum update -yk8s-test. local# yum install vim jq -yThe following kernel parameters have to be configured: k8s-test. local# cat > /etc/sysctl. d/99-kubernetes-cri. conf <<EOFnet. bridge. bridge-nf-call-iptables = 1net. ipv4. ip_forward = 1net. bridge. bridge-nf-call-ip6tables = 1EOFAnd also the following kernel modules have to be installed: k8s-test. local# modprobe br_netfilterk8s-test. local# echo br_netfilter > /etc/modules-load. d/br_netfilter. confk8s-test. local# modprobe overlayk8s-test. local# echo overlay > /etc/modules-load. d/overlay. confThe new sysctl parameters have to be loaded in the system with the following command: k8s-test. local# sysctl -p/etc/sysctl. d/99-kubernetes-cri. confThe next step is to disable SELinux: k8s-test. local# setenforce 0k8s-test. local# sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/configAnd the installation of Kubernetes and CRI-O can proceed. Installing Kubernetes and CRI-O: To install Kubernetes and CRI-O several ways can be used, in this guide there is the step-by-step guide where the user can do everything by himself or the alternative option, taking the easy road and running the ansible-playbook that will take care of almost everything. The ansible way: we are waiting for the PR to be merged in the official cri-o-ansible repository, meantime a fork in an alternative repository would be used. Also, note that the following commands are executed from a different place, in this case from a computer called laptop: laptop$ sudo yum install ansible -ylaptop# git clone https://github. com/ptrnull/cri-o-ansiblelaptop# cd cri-o-ansiblelaptop# git checkout fixes_k8s_1_16laptop# ansible-playbook cri-o. yml -i 192. 168. 0. 10,Once the playbook ends the system would be ready for getting CRI-O configured. The step-by-step way: If the ansible way was chosen, you may want to skip this section. Otherwise, let’s configure each piece. The required packages may be installed in the system running the following command: k8s-test. local# yum install btrfs-progs-devel container-selinux device-mapper-devel gcc git glib2-devel glibc-devel glibc-static gpgme-devel json-glib-devel libassuan-devel libgpg-error-devel libseccomp-devel make pkgconfig skopeo-containers tar wget -yInstall golang and the md2man packages: depending on the operating system running in your VM, it may be needed to change the name of the md2man golang package. k8s-test. local# yum install golang-github-cpuguy83-go-md2man golang -yThe following directories have to be created: /usr/local/go /etc/systemd/system/kubelet. service. d/ /var/lib/etcd /etc/cni/net. dk8s-test. local# for d in /usr/local/go /etc/systemd/system/kubelet. service. d/ /var/lib/etcd /etc/cni/net. d /etc/containers ; do mkdir -p $d; doneClone the runc repository: k8s-test. local# git clone https://github. com/opencontainers/runc /root/src/github. com/opencontainers/runcClone the CRI-O repository: k8s-test. local# git clone https://github. com/cri-o/cri-o /root/src/github. com/cri-o/cri-oClone the CNI repository: k8s-test. local# git clone https://github. com/containernetworking/plugins /root/src/github. com/containernetworking/pluginsTo build each part, a series of commands have to be executed, first building runc: k8s-test. local# cd /root/src/github. com/opencontainers/runck8s-test. local# export GOPATH=/rootk8s-test. local# make BUILDTAGS= seccomp selinux k8s-test. local# make installAnd also runc has to be linked in the correct path: k8s-test. local# ln -sf /usr/local/sbin/runc /usr/bin/runcNow building CRI-O (special focus on switching the branch): k8s-test. local# export GOPATH=/rootk8s-test. local# export GOBIN=/usr/local/go/bink8s-test. local# export PATH=/usr/local/go/bin:$PATHk8s-test. local# cd /root/src/github. com/cri-o/cri-ok8s-test. local# git checkout release-1. 16k8s-test. local# makek8s-test. local# make installk8s-test. local# make install. systemdk8s-test. local# make install. configCRI-O also needs the conmon software as a dependency: k8s-test. local# git clone http://github. com/containers/conmon /root/src/github. com/conmonk8s-test. local# cd /root/src/github. com/conmonk8s-test. local# makek8s-test. local# make installNow, the ContainerNetworking plugins have to be built and installed: k8s-test. local# cd /root/src/github. com/containernetworking/pluginsk8s-test. local# . /build_linux. shk8s-test. local# mkdir -p /opt/cni/bink8s-test. local# cp bin/* /opt/cni/bin/The cgroup manager has to be changed in the CRI-O configuration from the value of systemd to cgroupfs, to get it done, the file /etc/crio/crio. conf has to be edited and the variable cgroup_manager has to be replaced from its original value of systemd to cgroupfs (it could be already set it up to that value, in that case this step can be skipped): k8s-test. local# vim /etc/crio/crio. conf# group_manager = systemd group_manager = cgroupfs In the same file, the storage_driver is not configured, the variable storage_driver has to be uncommented and the value has to be changed from overlay to overlay2: k8s-test. local# vim /etc/crio/crio. conf#storage_driver = overlay storage_driver = overlay2 Also related with the storage, the storage_option has to be configured to have the following value: k8s-test. local# vim /etc/crio/crio. confstorage_option = [ overlay2. override_kernel_check=1 ]Preparing CRI-O: CRI-O is the lightweight container runtime for Kubernetes. As it is pointed in the CRI-O Website: CRI-O is an implementation of the Kubernetes CRI (Container Runtime Interface) to enable using OCI (Open Container Initiative) compatible runtimes. It is a lightweight alternative to using Docker as the runtime for Kubernetes. It allows Kubernetes to use any OCI-compliant runtime as the container runtime for running pods. Today it supports runc and Kata Containers as the container runtimes but any OCI-conformant runtime can be plugged in principle. CRI-O supports OCI container images and can pull from any container registry. It is a lightweight alternative to using Docker, Moby or rkt as the runtime for Kubernetes. The first step is to change the configuration of the network_dir parameter in the CRI-O configuration file, for doing so, the network_dir parameter in the /etc/crio/crio. conf file has to be changed to point to /etc/crio/net. d k8s-test. local$ vim /etc/crio/crio. conf[crio. network]# Path to the directory where CNI configuration files are located. network_dir = /etc/crio/net. d/ Also that directory has to be created: k8s-test. local$ mkdir /etc/crio/net. dThe reason behind that change is because CRI-O and kubeadm reset don’t play well together, as kubeadm reset empties /etc/cni/net. d/. Therefore, it is good to change the crio. network. network_dir in crio. conf to somewhere kubeadm won’t touch. To get more information the following link [Running CRI-O with kubeadm] in the References section can be checked. Now Kubernetes has to be configured to be able to talk to CRI-O, to proceed, a new file has to be created in /etc/default/kubelet with the following content: KUBELET_EXTRA_ARGS=--feature-gates= AllAlpha=false,RunAsGroup=true --container-runtime=remote --cgroup-driver=cgroupfs --container-runtime-endpoint='unix:///var/run/crio/crio. sock' --runtime-request-timeout=5mNow the systemd has to be reloaded: k8s-test. local# systemctl daemon-reloadCRI-O will use flannel network as it is recommended for multus so the following file has to be downloaded and configured: k8s-test. local# cd /etc/crio/net. d/k8s-test. local# wget https://raw. githubusercontent. com/cri-o/cri-o/master/contrib/cni/10-crio-bridge. confk8s-test. local# sed -i 's/10. 88. 0. 0/10. 244. 0. 0/g' 10-crio-bridge. confAs the previous code block has shown, the network used is 10. 244. 0. 0, now the crio service can be started and enabled: k8s-test. local# systemctl enable criok8s-test. local# systemctl start criok8s-test. local# systemctl status crio● crio. service - Container Runtime Interface for OCI (CRI-O) Loaded: loaded (/usr/local/lib/systemd/system/crio. service; enabled; vendor preset: disabled) Active: active (running) since mié 2019-10-02 16:17:06 CEST; 3s ago Docs: https://github. com/cri-o/cri-o Main PID: 15427 (crio) CGroup: /system. slice/crio. service └─15427 /usr/local/bin/criooct 02 16:17:06 k8s-test systemd[1]: Starting Container Runtime Interface for OCI (CRI-O). . . oct 02 16:17:06 k8s-test systemd[1]: Started Container Runtime Interface for OCI (CRI-O). In the next posts, the Kubernetes cluster will be set up, together with the pod Network and also the KubeVirt with the virtual Machines deployments. " }, { - "id": 94, + "id": 93, "url": "/2019/changelog-v0.21.0.html", "title": "KubeVirt v0.21.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 21. 0: Released on: Mon Sep 9 09:59:08 2019 +0200 CI: Support for Kubernetes 1. 14 Many bug fixes in several areas Support for virtctl migrate Support configurable number of controller threads Support to opt-out of bridge binding for podnetwork Support for OpenShift Prometheus monitoring Support for setting more SMBIOS fields Improved containerDisk memory usage and speed Fix CRI-O memory limit Drop spc_t from launcher Add feature gates to security sensitive features" }, { - "id": 95, + "id": 94, "url": "/2019/CNCF-Sandbox.html", "title": "KubeVirt is now part of CNCF Sandbox", "author" : "Pablo Iranzo Gómez", "tags" : "CNCF, sandbox", "body": "Some time ago, with the PR https://github. com/cncf/toc/pull/265, KubeVirt was proposed to be part of the CNCF Sandbox. On 9th September 2019, the project has finally accomplished the required steps to get in (including two sponsors) to get listed as part of it at https://www. cncf. io/sandbox-projects/ The document with the proposal can be read at the final repo at https://github. com/cncf/toc/blob/master/proposals/sandbox/kubevirt. adoc for more information. It’s interesting to see the messages of support at the PR that show interesting use cases by our users, so keep an eye on them! " }, { - "id": 96, + "id": 95, "url": "/2019/changelog-v0.20.0.html", "title": "KubeVirt v0.20.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 20. 0: Released on: Fri Aug 9 16:42:41 2019 +0200 Containerdisks are now secure and they are not copied anymore on every start. Create specific SecurityContextConstraints on OKD instead of using the Added clone authorization check for DataVolumes with PVC source The sidecar feature is feature-gated now Use container image shasums instead of tags for KubeVirt deployments Protect control plane components against voluntary evictions with a Replaced hardcoded virtctl by using the basename of the call, this enables Added RNG device to all Fedora VMs in tests and examples (newer kernels might The virtual memory is now set to match the memory limit, if memory limit is Support nftable for CoreOS Added a block-volume flag to the virtctl image-upload command Improved virtctl console/vnc data flow Removed DataVolumes feature gate in favor of auto-detecting CDI support Removed SR-IOV feature gate, it is enabled by default now VMI-related metrics have been renamed from kubevirt_vm_ to kubevirt_vmi_ Added metric to report the VMI count Improved integration with HCO by adding a CSV generator tool and modified CI Improvements:" }, { - "id": 97, + "id": 96, "url": "/2019/Kubevirt-CR-Condition-Types-Rename-Now-ACTIVE.html", "title": "KubeVirt Condition Types Renamed", "author" : "Pablo Iranzo Gómez", "tags" : "Condition types", "body": "Hi,As previously announced in /2019/KubeVirt-CR-Condition-Types-Rename. html, Condition Types have been renamed from Ready to Available and from Updating to Progressing, check the linked article for more details. Check Release notes on kubevirt. io for v0. 20. 0 to see the changes. " }, { - "id": 98, + "id": 97, "url": "/2019/KubeVirt-CR-Condition-Types-Rename.html", "title": "KubeVirt Condition Types Rename in Custom Resource", "author" : "Pablo Iranzo Gómez", "tags" : "condition types", "body": "The announcement: Hi KubeVirt Community! As per the message from Marc Sluiter on our mailing list: Hello everybody,today we merged a PR [0], which renamed the condition types on the KubeVirt custom resources. This was done for alignment of conditions of all components in the KubeVirt ecosystem, which are deployed by the Hyperconverged Cluster Operator (HCO)[1], in order to make it easier for HCO to determine the deployment status of these components. The conditions are explained in detail in [2]. For KubeVirt this means that especially the Ready condition was renamed to Available . This might affect you in case you used the Ready condition for waiting for a successful deployment of KubeVirt. If so, you need to update the corresponding command to something like `kubectl -n kubevirt wait kv kubevirt --for condition=Available`. The second renamed condition is Updating . This one is named Progressing now. As explained in [2], there also is a new condition named Degraded . The Created and Synchronized conditions are unchanged. These changes take effect immediately if you are deploying KubeVirt from the master branch, or starting with the upcoming v0. 20. 0 release. [0] https://github. com/kubevirt/kubevirt/pull/2548[1] https://github. com/kubevirt/hyperconverged-cluster-operator[2] https://github. com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions. mdBest regards,We’re renaming some of the prior ‘conditions’ reported by the Custom Resources. What does it mean to us: CR Rename We’re making KubeVirt more compatible with the standard for Operators, when doing so, some of the conditions are changing, so check your scripts using checks for conditions to use the new ones. | Prior | Actual | Note || :———-: | :—————: | :—————- || Ready | Available | Updated || Updating | Progressing | Updated || - | Degraded | New condition || Created | Created | Unchanged || Synchronized | Synchronized | Unchanged | References: Check for more information on the following URL’s https://github. com/kubevirt/kubevirt/pull/2548 https://github. com/kubevirt/hyperconverged-cluster-operator https://github. com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions. md" }, { - "id": 99, + "id": 98, "url": "/2019/NodeDrain-KubeVirt.html", "title": "Node Drain in KubeVirt", "author" : "DirectedSoul", "tags" : "node drain, eviction, nmo", "body": "Introduction: In a Kubernetes (k8s) cluster, the control plane(scheduler) is responsible for deploying workloads(pods, deployments, replicasets) on the worker nodes depending on the resource availability. What do we do with the workloads if the need arises for maintaining this node? Well, there is good news, node-drain feature and node maintenance operator(NMO) both come to our rescue in this situation. This post discusses evicting the VMI(virtual machine instance) and other resources from the node using node drain feature and NMO. Note The environment used for writing this post is based on OpenShift 4 with 3 Masters and 3 Worker nodes. HyperconvergedClusterOperator: The goal of the hyper-converged-cluster-operator (HCO) is to provide a single entry point for multiple operators (kubevirt, cdi, networking, etc) where users can deploy and configure them in a single object. This operator is sometimes referred to as a “meta operator” or an “operator for operators”. Most importantly, this operator doesn’t replace or interfere with OLM which is an open source toolkit to manage Kubernetes native applications, called Operators, in an effective, automated, and scalable way. Check for more information about OLM. It only creates operator CRs, which is the user’s prerogative. In our cluster (3 master and 3 nodes) we’ll be able to see something similar to: $oc get nodesip-10-0-132-147. us-east-2. compute. internal Ready worker 14m v1. 13. 4+27816e1b1ip-10-0-142-95. us-east-2. compute. internal Ready master 15m v1. 13. 4+27816e1b1ip-10-0-144-125. us-east-2. compute. internal Ready worker 14m v1. 13. 4+27816e1b1ip-10-0-150-125. us-east-2. compute. internal Ready master 14m v1. 13. 4+27816e1b1ip-10-0-161-166. us-east-2. compute. internal Ready master 15m v1. 13. 4+27816e1b1ip-10-0-173-203. us-east-2. compute. internal Ready worker 15m v1. 13. 4+27816e1b1To test the node eviction, there are two methods. Method 1: Use kubectl node drain command: Before sending a node into maintenance state it is very much necessary to evict the resources on it, VMI’s, pods, deployments etc. One of the easiest option for us is to stick to the oc adm drain command. For this, select the node from the cluster from which you want the VMIs to be evicted oc get nodesHere ip-10-0-173-203. us-east-2. compute. internal, then issue the following command. oc adm drain <node-name> --delete-local-data --ignore-daemonsets=true --force --pod-selector=kubevirt. io=virt-launcher --delete-local-data is used to remove any VMI’s that use emptyDir volumes, however the data in those volumes are ephemeral which means it is safe to delete after termination. --ignore-daemonsets=true is a must needed flag because when VMI is deployed a daemon set named virt-handler will be running on each node. DaemonSets are not allowed to be evicted using kubectl drain. By default, if this command encounters a DaemonSet on the target node, the command will fail. This flag tells the command it is safe to proceed with the eviction and to just ignore DaemonSets. --pod-selector=kubevirt. io=virt-launcher flag tells the command to evict the pods that are managed by kubevirt Evict a node: If you want to evict all pods from the node just use: oc adm drain <node name> --delete-local-data --ignore-daemonsets=true --forceHow to evacuate VMIs via Live Migration from a Node: If the LiveMigration feature gate is enabled, it is possible to specify an evictionStrategy on VMIs which will react with live-migrations on specific taints on nodes. The following snippet on a VMI ensures that the VMI is migrated if the kubevirt. io/drain:NoSchedule taint is added to a node: spec: evictionStrategy: LiveMigrateOnce the VMI is created, taint the node with kubectl taint nodes foo kubevirt. io/drain=draining:NoScheduleThis command will then trigger a migration. Behind the scenes a PodDisruptionBudget is created for each VMI which has an evictionStrategy defined. This ensures that evictions are be blocked on these VMIs and that we can guarantee that a VMI will be migrated instead of shut off. Re-enabling a Node after Eviction: We have seen how to make the node unschedulable, now lets see how to re-enable the node. The oc adm drain will result in the target node being marked as unschedulable. This means the node will not be eligible for running new VirtualMachineInstances or Pods. If target node should become schedulable again, the following command must be run: oc adm uncordon <node name>Method 2: Use Node Maintenance Operator (NMO): NMO is part of HyperConvergedClusterOperator, so we need to deploy it. Either check: the gist for deploying HCO the blog post on HCOHere will continue using the gist for demonstration purposes. Observe the resources that get created after the HCO is installed $oc get pods -n kubevirt-hyperconvergedNAME READY STATUS RESTARTS AGEcdi-apiserver-769fcc7bdf-xgpt8 1/1 Running 0 12mcdi-deployment-8b64c5585-gq46b 1/1 Running 0 12mcdi-operator-77b8847b96-kx8rx 1/1 Running 0 13mcdi-uploadproxy-8dcdcbff-47lng 1/1 Running 0 12mcluster-network-addons-operator-584dff99b8-2c96w 1/1 Running 0 13mhco-operator-59b559bd44-vpznq 1/1 Running 0 13mkubevirt-ssp-operator-67b78446f7-b9klr 1/1 Running 0 13mkubevirt-web-ui-operator-9df6b67d9-f5l4l 1/1 Running 0 13mnode-maintenance-operator-6b464dc85-zd6nt 1/1 Running 0 13mvirt-api-7655b9696f-g48p8 1/1 Running 1 12mvirt-api-7655b9696f-zfsw9 1/1 Running 0 12mvirt-controller-7c4584f4bc-6lmxq 1/1 Running 0 11mvirt-controller-7c4584f4bc-6m62t 1/1 Running 0 11mvirt-handler-cfm5d 1/1 Running 0 11mvirt-handler-ff6c8 1/1 Running 0 11mvirt-handler-mcl7r 1/1 Running 1 11mvirt-operator-87d7c98b-fvvzt 1/1 Running 0 13mvirt-operator-87d7c98b-xzc42 1/1 Running 0 13mvirt-template-validator-76cbbd6f68-5fbzx 1/1 Running 0 12mAs seen from above HCO deploys the node-maintenance-operator. Next, let’s install a kubevirt CR to start using VM workloads on worker nodes. Please feel free to follow the steps here and deploy a VMI as explained. Please feel free to check the video that explains the same $oc get vmsNAME AGE RUNNING VOLUMEtestvm 2m13s trueDeploy a node-maintenance-operator CR: As seen from above NMO is deployed from HCO, the purpose of this operator is to watch the node maintenance CustomResource(CR) called NodeMaintenance which mainly contains the node that needs a maintenance and the reason for it. The below actions are performed If a NodeMaintenance CR is created: Marks the node as unschedulable, cordons it and evicts all the pods from that node If a NodeMaintenance CR is deleted: Marks the node as schedulable, uncordons it, removes pod from maintenance. To install the NMO, please follow upsream instructions at NMO Either use HCO to create NMO Operator or deploy NMO operator as shown below After you follow the instructions: Create a CRD oc create -f deploy/crds/nodemaintenance_crd. yamlcustomresourcedefinition. apiextensions. k8s. io/nodemaintenances. kubevirt. io created Create the NS oc create -f deploy/namespace. yamlnamespace/node-maintenance-operator created Create a Service Account: oc create -f deploy/service_account. yamlserviceaccount/node-maintenance-operator created Create a ROLE oc create -f deploy/role. yamlclusterrole. rbac. authorization. k8s. io/node-maintenance-operator created Create a ROLE Binding oc create -f deploy/role_binding. yamlclusterrolebinding. rbac. authorization. k8s. io/node-maintenance-operator created Then finally make sure to add the image version of the NMO operator in the deploy/operator. yml image: quay. io/kubevirt/node-maintenance-operator:v0. 3. 0 and then deploy the NMO Operator as shown oc create -f deploy/operator. yamldeployment. apps/node-maintenance-operator created Finally, We can verify the deployment for the NMO Operator as below oc get deployment -n node-maintenance-operatorNAME READY UP-TO-DATE AVAILABLE AGEnode-maintenance-operator 1/1 1 1 4m23sNow that the NMO operator is created, we can create the NMO CR which puts the node into maintenance mode (this CR has the info about the node->from which the pods needs to be evicted and the reason for the maintenance) cat deploy/crds/nodemaintenance_cr. yamlapiVersion: kubevirt. io/v1alpha1kind: NodeMaintenancemetadata: name: nodemaintenance-xyzspec: nodeName: <Node-Name> reason: Test node maintenance For testing purpose, we can deploy a sample VM instance as shown: kubectl apply -f https://kubevirt. io/labs/manifests/vm. yamlNow start the VM testvm . /virtctl start testvmWe can see that it’s up and running kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 92s Running 10. 131. 0. 17 ip-10-0-173-203. us-east-2. compute. internalAlso, we can see the status: kubectl get vmis -o yaml testvm. . . interfaces: - ipAddress: 10. 131. 0. 17 mac: 0a:58:0a:83:00:11 name: default migrationMethod: BlockMigration nodeName: ip-10-0-173-203. us-east-2. compute. internal #NoteDown the nodeName phase: RunningNote down the node name and edit the nodemaintenance_cr. yaml file and then issue the CR manifest which sends the node into maintenance. Now to evict the pods from the node ip-10-0-173-203. us-east-2. compute. internal, edit the node-maintenance_cr. yaml as shown: cat deploy/crds/nodemaintenance_cr. yamlapiVersion: kubevirt. io/v1alpha1kind: NodeMaintenancemetadata: name: nodemaintenance-xyzspec: nodeName: ip-10-0-173-203. us-east-2. compute. internal reason: Test node maintenance As soon as you apply the above CR, the current VM gets deployed in the other node, oc apply -f deploy/crds/nodemaintenance_cr. yamlnodemaintenance. kubevirt. io/nodemaintenance-xyz createdWhich immediately evicts the VMI kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 33s Schedulingkubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 104s Running 10. 128. 2. 20 ip-10-0-132-147. us-east-2. compute. internalip-10-0-173-203. us-east-2. compute. internal Ready,SchedulingDisabled workerWhen all of this happens, we can view the changes that are taking place with: oc logs pods/node-maintenance-operator-645f757d5-89d6r -n node-maintenance-operator. . . { level : info , ts :1559681430. 650298, logger : controller_nodemaintenance , msg : Applying Maintenance mode on Node: ip-10-0-173-203. us-east-2. compute. internal with Reason: Test node maintenance , Request. Namespace : , Request. Name : nodemaintenance-xyz }{ level : info , ts :1559681430. 7509086, logger : controller_nodemaintenance , msg : Taints: [{\ key\ :\ node. kubernetes. io/unschedulable\ ,\ effect\ :\ NoSchedule\ },{\ key\ :\ kubevirt. io/drain\ ,\ effect\ :\ NoSchedule\ }] will be added to node ip-10-0-173-203. us-east-2. compute. internal }{ level : info , ts :1559681430. 7509348, logger : controller_nodemaintenance , msg : Applying kubevirt. io/drain taint add on Node: ip-10-0-173-203. us-east-2. compute. internal }{ level : info , ts :1559681430. 7509415, logger : controller_nodemaintenance , msg : Patchi{ level : info , ts :1559681430. 9903986, logger : controller_nodemaintenance , msg : evicting pod \ virt-controller-b94d69456-b9dkw\ \n }{ level : info , ts :1559681430. 99049, logger : controller_nodemaintenance , msg : evicting pod \ community-operators-5cb68db58-4m66j\ \n }{ level : info , ts :1559681430. 9905066, logger : controller_nodemaintenance , msg : evicting pod \ alertmanager-main-1\ \n }{ level : info , ts :1559681430. 9905581, logger : controller_nodemaintenance , msg : evicting pod \ virt-launcher-testvm-q5t7l\ \n }{ level : info , ts :1559681430. 9905746, logger : controller_nodemaintenance , msg : evicting pod \ redhat-operators-6b6f6bd788-zx8nm\ \n }{ level : info , ts :1559681430. 990588, logger : controller_nodemaintenance , msg : evicting pod \ image-registry-586d547bb5-t9lwr\ \n }{ level : info , ts :1559681430. 9906075, logger : controller_nodemaintenance , msg : evicting pod \ kube-state-metrics-5bbd4c45d5-sbnbg\ \n }{ level : info , ts :1559681430. 9906383, logger : controller_nodemaintenance , msg : evicting pod \ certified-operators-9f9f6fd5c-9ltn8\ \n }{ level : info , ts :1559681430. 9908028, logger : controller_nodemaintenance , msg : evicting pod \ virt-api-59d7c4b595-dkpvs\ \n }{ level : info , ts :1559681430. 9906204, logger : controller_nodemaintenance , msg : evicting pod \ router-default-6b57bcc884-frd57\ \n }{ level : info , ts :1559681430. 9908257, logger : controller_nodemaintenance , msg : evictClearly we can see that the previous node went into SchedulingDisabled state and the VMI was evicted and placed into other node in the cluster. This demonstrates the node eviction using NMO. VirtualMachine Evictions notes: The eviction of any VirtualMachineInstance that is owned by a VirtualMachine set to running=true will result in the VirtualMachineInstance being re-scheduled to another node. The VirtualMachineInstance in this case will be forced to power down and restart on another node. In the future once KubeVirt introduces live migration support, the VM will be able to seamlessly migrate to another node during eviction. Wrap-up: The NMO achieved its aim of evicting the VMI’s successfully from the node, hence we can now safely repair/update the node and make it available for running the workloads again once the maintenance is over. " }, { - "id": 100, + "id": 99, "url": "/2019/How-To-Import-VM-into-Kubevirt.html", "title": "How to import VM into KubeVirt", "author" : "DirectedSoul", "tags" : "cdi, vm import", "body": "Introduction: Kubernetes has become the new way to orchestrate the containers and to handle the microservices, but what if I already have applications running on my old VM’s in my datacenter ? Can those apps ever be made k8s friendly ? Well, if that is the use-case for you, then we have a solution with KubeVirt! In this blog post we will show you how to deploy a VM as a yaml template and the required steps on how to import it as a PVC onto your kubernetes environment using the CDI and KubeVirt add-ons. Assumptions: A basic understanding of the k8s architecture: In its simplest terms Kubernetes is a portable, extensible open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. For complete details check Kubernetes-architecture User is familiar with the concept of a Libvirt based VM PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator. Feel free to check more on Persistent Volume(PV). Persistent Volume Claim (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Feel free to check more on Persistent Volume Claim(PVC). User is familiar with the concept of KubeVirt-architecture and CDI-architecture User has already installed KubeVirt in an available K8s environment, if not please follow the link Installing KubeVirt to further proceed. User is already familiar with VM operation with Kubernetes, for a refresher on how to use ‘Virtual Machines’ in Kubernetes, please do check LAB 1 before proceeding. Creating Virtual Machines from local images with CDI and virtctl: The Containerized Data Importer (CDI) project provides facilities for enabling Persistent Volume Claims (PVCs) to be used as disks for KubeVirt VMs. The three main CDI use cases are: Import a disk image from a URL to a PVC (HTTP/S3) Clone an existing PVC Upload a local disk image to a PVCThis document covers the third use case and covers the HTTP based import use case at the end of this post. NOTE: You should have CDI installed in your cluster, a VM disk that you’d like to upload, and virtctl in your path Please follow the instructions for the installation of CDI (v1. 9. 0 as of this writing) Expose cdi-uploadproxy service: The cdi-uploadproxy service must be accessible from outside the cluster. Here are some ways to do that: NodePort Service Ingress RouteWe can take a look at example manifests here The supported image formats are: . img . iso . qcow2 Compressed (. tar, . gz or . xz) of the above formats. We will use this image from CirrOS Project (in . img format) We can use virtctl command for uploading the image as shown below: virtctl image-upload --helpUpload a VM image to a PersistentVolumeClaim. Usage: virtctl image-upload [flags]Examples: # Upload a local disk image to a newly created PersistentVolumeClaim: virtctl image-upload --uploadproxy-url=https://cdi-uploadproxy. mycluster. com --pvc-name=upload-pvc --pvc-size=10Gi --image-path=/images/fedora28. qcow2Flags: --access-mode string The access mode for the PVC. (default ReadWriteOnce ) -h, --help help for image-upload --image-path string Path to the local VM image. --insecure Allow insecure server connections when using HTTPS. --no-create Don't attempt to create a new PVC. --pvc-name string The destination PVC. --pvc-size string The size of the PVC to create (ex. 10Gi, 500Mi). --storage-class string The storage class for the PVC. --uploadproxy-url string The URL of the cdi-upload proxy service. --wait-secs uint Seconds to wait for upload pod to start. (default 60)Use virtctl options for a list of global command-line options (applies to all commands). Creation of VirtualMachineInstance from a PVC: Here, virtctl image-upload works by creating a PVC of the requested size, sending an UploadTokenRequest to the cdi-apiserver, and uploading the file to the cdi-uploadproxy. virtctl image-upload --pvc-name=cirros-vm-disk --pvc-size=500Mi --image-path=/home/shegde/images/cirros-0. 4. 0-x86_64-disk. img --uploadproxy-url=<url to upload proxy service>The data inside are ephemeral meaning is lost when the VM restarts, in order to prevent that, and provide a persistent data storage, we use PVC (persistentVolumeClaim) which allows connecting a PersistentVolumeClaim to a VM disk. cat <<EOF | kubectl apply -f -apiVersion: kubevirt. io/v1alpha3kind: VirtualMachineInstancemetadata: name: cirros-vmspec: domain: devices: disks: - disk: bus: virtio name: pvcdisk machine: type: resources: requests: memory: 64M terminationGracePeriodSeconds: 0 volumes: - name: pvcdisk persistentVolumeClaim: claimName: cirros-vm-diskstatus: {}EOFA PersistentVolume can be in filesystem or block mode: Filesystem: For KubeVirt to be able to consume the disk present on a PersistentVolume’s filesystem, the disk must be named disk. img and be placed in the root path of the filesystem. Currently the disk is also required to be in raw format. Important: The disk. img image file needs to be owned by the user-id 107 in order to avoid permission issues. Additionally, if the disk. img image file has not been created manually before starting a VM then it will be created automatically with the PersistentVolumeClaim size. Since not every storage provisioner provides volumes with the exact usable amount of space as requested (e. g. due to filesystem overhead), KubeVirt tolerates up to 10% less available space. This can be configured with the pvc-tolerate-less-space-up-to-percent value in the kubevirt-config ConfigMap. Block: Use a block volume for consuming raw block devices. To do that, BlockVolume feature gate must be enabled. A simple example which attaches a PersistentVolumeClaim as a disk may look like this: metadata: name: testvmi-pvcapiVersion: kubevirt. io/v1alpha3kind: VirtualMachineInstancespec: domain: resources: requests: memory: 64M devices: disks: - name: mypvcdisk lun: {} volumes: - name: mypvcdisk persistentVolumeClaim: claimName: mypvcCreation with a DataVolume: DataVolumes are a way to automate importing virtual machine disks onto pvc’s during the virtual machine’s launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user. DataVolume VM Behavior: DataVolumes can be defined in the VM spec directly by adding the DataVolumes to the dataVolumeTemplates list. Below is an example. apiVersion: kubevirt. io/v1alpha3kind: VirtualMachinemetadata: labels: kubevirt. io/vm: vm-alpine-datavolume name: vm-alpine-datavolumespec: running: false template: metadata: labels: kubevirt. io/vm: vm-alpine-datavolume spec: domain: devices: disks: - disk: bus: virtio name: datavolumedisk1 resources: requests: memory: 64M volumes: - dataVolume: #Note the type is dataVolume name: alpine-dv name: datavolumedisk1 dataVolumeTemplates: # Automatically a PVC of size 2Gi is created - metadata: name: alpine-dv spec: pvc: accessModes: - ReadWriteOnce resources: requests: storage: 2Gi source: #This is the source where the ISO file resides http: url: http://cdi-http-import-server. kubevirt/images/alpine. isoFrom the above manifest the two main sections that needs an attention are source and pvc. The source part declares that there is a disk image living on an http server that we want to use as a volume for this VM. The pvc part declares the spec that should be used to create the pvc that hosts the source data. When this VM manifest is posted to the cluster, as part of the launch flow a pvc will be created using the spec provided and the source data will be automatically imported into that pvc before the VM starts. When the VM is deleted, the storage provisioned by the DataVolume will automatically be deleted as well. A few caveats to be considered before using DataVolumes: A DataVolume is a custom resource provided by the Containerized Data Importer (CDI) project. KubeVirt integrates with CDI in order to provide users a workflow for dynamically creating pvcs and importing data into those pvcs. In order to take advantage of the DataVolume volume source on a VM or VMI, the DataVolumes feature gate must be enabled in the kubevirt-config config map before KubeVirt is installed. CDI must also be installed(follow the steps as mentioned above). Enabling the DataVolumes feature gate: Below is an example of how to enable DataVolume support using the kubevirt-config config map. cat <<EOF | kubectl create -f -apiVersion: v1kind: ConfigMapmetadata: name: kubevirt-config namespace: kubevirt labels: kubevirt. io: data: feature-gates: DataVolumes EOFThis config map assumes KubeVirt will be installed in the KubeVirt namespace. Change the namespace to suit your installation. First post the configmap above, then install KubeVirt. At that point DataVolume integration will be enabled. Wrap-up: As demonstrated, VM can be imported as a k8s object using a CDI project along with KubeVirt. For more detailed insights, please feel free to follow the KubeVirt project. " }, { - "id": 101, + "id": 100, "url": "/2019/website-roadmap.html", "title": "Website roadmap", "author" : "Pablo Iranzo Gómez", "tags" : "website, community, roadmap", "body": "Detour ahead!Working with websites and with this KubeVirt website for a while has given the idea of things that should improve it. As this is a community-driven effort, what could better do rather than ask YOU for feedback? We’ve created a TODO. md file to track what we’ve identified. Additionally, as that file is part of the repository, it can be PR’d, commented via the PR’s to it, etc. Please, let us know what do you think about what is proposed, or propose new ideas to be added (or create new issues to have them added) Thanks for being part of KubeVirt community! " }, { - "id": 102, + "id": 101, "url": "/2019/kubevirt-with-ansible-part-2.html", "title": "KubeVirt with Ansible, part 2", "author" : "mmazur", "tags" : "ansible", "body": "Part 1 contained a short introduction to basic VM management with Ansible’s kubevirt_vm module. This time we’ll paint a more complete picture of all the features on offer. As before, examples found herein are also available as full working playbooks in ourplaybooks example repository. Additionally, each section of this post links to the corresponding module’s Ansible documentation page. Those pages always contain an Examples section, which the reader is encouraged to look through, as they havemany more ways of using the modules than can reasonably fit here. More VM management: Virtual machines managed by KubeVirt are highly customizable. Among the features accessible from Ansible, are: various libvirt–level virtualized hardware tweaks (e. g. machine_type or cpu_model), network interface configuration (interfaces), including multi–NIC utilizing the Multus CNI, non–persistent VMs (ephemeral: yes), direct DataVolumes support (datavolumes), and OpenShift Templates support (template). Further resources: Ansible module documentation Examples, lots of examples DataVolumes Introductory blog post Upstream documentation Multus Introductory blog post GitHub repo VM Image Management with the Containerized Data Importer: The main functionality of the kubevirt_pvc module is to manage Persistent Volume Claims. The following snippetshould seem familiar to anyone who dealt with PVCs before: kubevirt_pvc: name: pvc1 namespace: default size: 100Mi access_modes: - ReadWriteOnceRunning it inside a playbook will result in a new PVC named pvc1 with the access mode ReadWriteOnce and at least100Mi of storage assigned. The option dedicated to working with VM images is named cdi_source and lets one fill a PVC with data immediatelyupon creation. But before we get to the examples, the Containerized Data Importer needs to be properly deployed,which is as simple as running the following commands: export CDI_VER=$(curl -s https://github. com/kubevirt/containerized-data-importer/releases/latest | grep -o v[0-9]\. [0-9]*\. [0-9]* )kubectl apply -f https://github. com/kubevirt/containerized-data-importer/releases/download/$CDI_VER/cdi-operator. yamlkubectl apply -f https://github. com/kubevirt/containerized-data-importer/releases/download/$CDI_VER/cdi-cr. yamlOnce kubectl get pods -n cdi confirms all pods are ready, CDI is good to go. The module can instruct CDI to fill the PVC with data from: a remote HTTP(S) server (http:), a container registry (registry:), a local file (upload: yes), though this requires using kubevirt_cdi_upload for the actual upload step, or nowhere (the blank: yes option). Here’s a simple example: kubevirt_pvc:name: pvc2namespace: defaultsize: 100Miaccess_modes: - ReadWriteOncewait: yescdi_source: http: url: https://download. cirros-cloud. net/0. 4. 0/cirros-0. 4. 0-x86_64-disk. img infoPlease notice the wait: yes parameter. The module will only exit after CDI has completed transferring its data. Let’s see this in action: [mmazur@klapek part2]$ ansible-playbook pvc_cdi. yaml(…)TASK [Create pvc and fetch data] **********************************************************************************changed: [localhost]PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0[mmazur@klapek part2]$ kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEpvc2 Bound local-pv-6b6380e2 37Gi RWO local 71s[mmazur@klapek part2]$ kubectl get pvc/pvc2 -o yaml|grep cdi cdi. kubevirt. io/storage. import. endpoint: https://download. cirros-cloud. net/0. 4. 0/cirros-0. 4. 0-x86_64-disk. img cdi. kubevirt. io/storage. import. importPodName: importer-pvc2-gvn5c cdi. kubevirt. io/storage. import. source: http cdi. kubevirt. io/storage. pod. phase: SucceededEverything worked as expected. Further resources: Ansible module documentation (kubevirt_pvc) Ansible module documentation (kubevirt_cdi_upload) CDI GitHub RepoInventory plugin: The default way of using Ansible is to iterate over a list of hosts and perform operations on each one. Listing KubeVirt VMs can be done using the KubeVirt inventory plugin. It needs a bit of setting up before it canbe used. First, enable the plugin in ansible. cfg: [inventory]enable_plugins = kubevirtThen configure the plugin using a file named kubevirt. yml or kubevirt. yaml: plugin: kubevirtconnections: - namespaces: - default network_name: defaultAnd now let’s see if it worked and there’s a VM running in the default namespace (as represented by thenamespace_default inventory group): [mmazur@klapek part2]$ ansible -i kubevirt. yaml namespace_default --list-hosts [WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does notmatch 'all' hosts (0):Right, we don’t have any VMs running. Let’s go back to part 1, create vm1, make sure it’s runingand then try again: [mmazur@klapek part2]$ ansible-playbook . . /part1/02_vm1. yaml(…)PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0[mmazur@klapek part2]$ ansible-playbook . . /part1/01_vm1_running. yaml(…)PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0[mmazur@klapek part2]$ ansible -i kubevirt. yaml namespace_default --list-hosts hosts (1): default-vm1-2c680040-9e75-11e9-8839-525500d15501Works! Further resources: Ansible inventory plugin documentationMore: Lastly, for the sake of brevity, a quick mention of the remaining modules: kubevirt_presets allows setting upVM presets to be used by deployed VMs, kubevirt_template brings in a generictemplating mechanism, when running on top of OpenShift or OKD, and kubevirt_rs lets one configure KubeVirt’sown ReplicaSets for running multiple instances of a specified virtual machine. " }, { - "id": 103, + "id": 102, "url": "/2019/changelog-v0.19.0.html", "title": "KubeVirt v0.19.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 19. 0: Released on: Fri Jul 5 12:52:16 2019 +0200 Fixes when run on kind Fixes for sub-resource RBAC Limit pod network interface bindings Many additional bug fixes in many areas Additional testcases for updates, disk types, live migration with NFS Additional testcases for memory over-commit, block storage, cpu manager, Improvements around HyperV Improved error handling for runStartegies Improved update procedure Improved network metrics reporting (packets and errors) Improved guest overhead calculation Improved SR-IOV testsuite Support for live migration auto-converge Support for config-drive disks Support for setting a pullPolicy con containerDisks Support for unprivileged VMs when using SR-IOV Introduction of a project security policy" }, { - "id": 104, + "id": 103, "url": "/2019/changelog-v0.18.0.html", "title": "KubeVirt v0.18.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 18. 0: Released on: Wed Jun 5 22:25:09 2019 +0200 Build: Use of go modules CI: Support for Kubernetes 1. 13 Countless testcase fixes and additions Several smaller bug fixes Improved upgrade documentation" }, { - "id": 105, + "id": 104, "url": "/2019/Kubevirt-vagrant-provider.html", "title": "KubeVirt vagrant provider", "author" : "pkliczewski", "tags" : "vagrant, lifecycle, virtual machines", "body": "IntroductionVagrant is a command line utility for managing the lifecycle of virtual machines. There are number of providers available which allow to control and provision virtual machines in different environment. In this blog post we update how to use the provider to manage KubeVirt. The KubeVirt Vagrant provider implements the following features: Manages virtual machines lifecycle - start, halt, status and destroy. Creates virtual machines using templates, container disks or existing pvc. Supports Vagrant built-in provisioners. Provides ability to ssh to the virtual machines Supports folder synchronization by using rsyncInstallationIn order to use the provider we need to install Vagrant first. The steps how to do it are available here. Once command line tool is available in our system, we can install the plugin by running: $ vagrant plugin install vagrant-kubevirtNow, we can obtain predefined box and start it using: $ vagrant up --provider=kubevirtVirtual machine definitionInstead of building a virtual machine from scratch, which would be a slow and tedious process, Vagrant uses a base image as template for virtual machines. These base images are known as “boxes” and every provider must introduce its own box format. The provider introduces kubevirt boxes. You can view an example box here. There are two ways to tell Vagrant, how to connect to KubeVirt cluster in Vagrantfile: use Kubernetes configuration file. When no other connection details provided, the provider will look for kubeconfig using value of KUBECONFIG environment variable or $HOME/. kube/config location. define connection details as part of box definitionVagrant. configure( 2 ) do |config| config. vm. provider :kubevirt do |kubevirt| kubevirt. hostname = '<kubevirt host>' kubevirt. port = '<kubevirt port>' kubevirt. token = '<token>' endendValues used in above sample box: kubevirt host - Hostname where KubeVirt is deployed kubevirt port - Port on where KubeVirt is listening token - User token used to authenticate any requestThere are number of options we can customize for specific a virtal machine: cpus - Number of cpus used by a virtual machine memory - Amount of memory by a virtual machineWe can choose one of the three following options: template - Name of a template which will be used to create the virtual machine image - Name of a container disk stored in a registry pvc - Name of persistent volume claim containing virtual machine diskBelow, you can find sample Vagrantfile exposing all the supported features: Vagrant. configure( 2 ) do |config| # name of the box config. vm. box = 'kubevirt' # vm boot timeout config. vm. boot_timeout = 360 # disables default vagrant folder config. vm. synced_folder . , /vagrant , disabled: true # synchoronizes a directory between a host and virtual machine config. vm. synced_folder $HOME/src , /srv/website , type: rsync # uses provision action to touch a file in a virtual machine config. vm. provision shell do |s| s. inline = touch example. txt end # defines virtual machine resources and source of disk config. vm. provider :kubevirt do |kubevirt| kubevirt. cpus = 2 kubevirt. memory = 512 kubevirt. image = 'kubevirt/fedora-cloud-container-disk-demo' end # defines a user configured on a virtual machine using cloud-init config. ssh. username = 'vagrant' config. ssh. password = 'vagrant'endUsageNow, once we defined a virtual machine we can see how to use the provider to manage it. vagrant upThe above command starts a virtual machines and performs any additonal operations defined in the Vagrantfile like provisioning, folder synchronization setup. For more information check here vagrant haltThe above command stops a virtual machine. For more information check here vagrant statusThe above command provides status of a virtual machine. For more information check here vagrant destroyThe above command stops a virtual machine and destroys all the resources used. For more information check here vagrant provisionThe above command runs configured provisioners for specific virtual machine. For more information check here vagrant sshThe above command ssh to running virtual machine. For more information check here Future workThere are still couple of features we would like to implement such as network management or user friendly box packaging. " }, { - "id": 106, + "id": 105, "url": "/2019/kubevirt-with-ansible-part-1.html", "title": "KubeVirt with Ansible, part 1 – Introduction", "author" : "mmazur", "tags" : "ansible", "body": "KubeVirt is a great solution for migrating existing workloads towards Kubernetes without having to containerizeeverything all at once (or at all). If some parts of your system can run as pods, while others are perfectly fine as virtual machines, KubeVirt is thetechnology that lets you seamlessly run both in a single cluster. And with the recent release of Ansible 2. 8 containing a new set of dedicated modules, it’s now possible to treat KubeVirtjust like any other ansible–supported VM hosting system. Already an Ansible user? Or maybe still researching your options?This series of posts should give you a good primer on how combining both technologies can ease your Kubernetes journey. Prerequisites: While it’s possible to specify the connection and authentication details of your k8s cluster directly in theplaybook, for the purpose of this introduction, we’ll assume you have a working kubeconfig file in your system. Ifrunning kubectl get nodes correctly returns a list of nodes and you’ve already deployed KubeVirt, then you’regood to go. If not, here’s a KubeVirt quickstart (with Minikube). Basic VM management: Before we get down to the YAML, please keep in mind that this post contains only the most interesting bits of the playbooks. To get actually runnable versions of each example, take a look at this code repository. Let’s start with creating the most basic VM by utilizing the kubevirt_vm module, like so: kubevirt_vm: namespace: default name: vm1 state: runningAnd now run it: [mmazur@klapek blog1]$ ansible-playbook 01_vm1_running. yaml(…)TASK [Create first vm?] *******************************************************************************************fatal: [localhost]: FAILED! => { changed : false, msg : It's impossible to create an empty VM or change state of a non-existent VM. }PLAY RECAP ********************************************************************************************************localhost : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0Oops, too basic. Let’s try again, but this time with a small set of parameters specifying cpu, memory and a boot disk. The latter will be a demo image provided by the KubeVirt project. kubevirt_vm: namespace: default name: vm1 cpu_cores: 1 memory: 64Mi disks: - name: containerdisk volume: containerDisk: image: kubevirt/cirros-container-disk-demo:latest disk: bus: virtioAnd run it: [mmazur@klapek blog1]$ ansible-playbook 02_vm1. yaml(…)TASK [Create first vm, for real this time] ************************************************************************changed: [localhost]PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0It worked! One thing to note is that by default kubevirt_vm will not start a newly–created VM. Running kubectl get vms -n default will confirm as much. Changing this behavior requires specifying state: running as one of the module’s parameters when creating a new VM. Or we can get vm1 toboot by running the first playbook one more time, since this time the task will be interpreted as attempting to change the state ofan existing VM to running, which is what we want. [mmazur@klapek blog1]$ ansible-playbook 01_vm1_running. yaml(…)TASK [Create first vm] ********************************************************************************************changed: [localhost]PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0While the first two runs likely finished almost immediately, this time around ansible-playbook is waiting for the VM to boot, sodon’t be alarmed if that takes a bit of time. If everything went correctly, you should have an actual virtual machine running inside your k8s cluster. If present, the virtctl toolcan be used to log onto the new VM and to take a look around. Run virtctl console vm1 -n default and press ENTER to get a login prompt. It’s useful to note at this point something about how Ansible and Kubernetes operate. This is best illustrated with an example. Let’s runthe first playbook one more time: [mmazur@klapek blog1]$ ansible-playbook 01_vm1_running. yaml(…)TASK [Create first vm?] *******************************************************************************************ok: [localhost]PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0The output is almost the same as on the previous run, with the one difference being that this time no changes were reported (changed=0). This is a concept called idempotency and is present in both Kubernetes and Ansible (though not everywhere). In this context it means that if the state you want to achieve with your playbook (have the VM running) is the state that the clustercurrently is in (the VM is already running) then nothing will change, no matter how many times you attempt the operation. Note Kubernetes versions prior to 1. 12 contain a bug that might report operations that didn’t really do anything as having changed things. If your second (and third, etc. ) run of 01_vm1_running. yaml keep reporting changed=1, this might be the reason why. Let’s finish with cleaning up after ourselves by removing vm1. First the relevant YAML: kubevirt_vm: namespace: default name: vm1 state: absentAnd run it: [mmazur@klapek blog1]$ ansible-playbook 03_vm1_absent. yaml(…)TASK [Delete the vm] **********************************************************************************************changed: [localhost]PLAY RECAP ********************************************************************************************************localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0Now the VM is gone, which running kubectl get vms -n default will confirm. Just like before, if you run the playbook a few more times, the play recap will keep reporting changed=0. Next: Please read part two for a wider overview of available features. " }, { - "id": 107, + "id": 106, "url": "/2019/changelog-v0.17.0.html", "title": "KubeVirt v0.17.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 17. 0: Released on: Mon May 6 16:18:01 2019 +0200 Several testcase additions Improved virt-controller node distribution Improved support between version migrations Support for a configurable MachineType default Support for live-migration of a VM on node taints Support for VM swap metrics Support for versioned virt-launcher / virt-handler communication Support for HyperV flags Support for different VM run strategies (i. e manual and rerunOnFailure) Several fixes for live-migration (TLS support, protected pods)" }, { - "id": 108, + "id": 107, "url": "/2019/Hyper-Converged-Operator.html", "title": "Hyper Converged Operator", "author" : "DirectedSoul", "tags" : "HCO, hyperconverged operator", "body": "HCO known as Hyper Converged OperatorPrerequisites: This Blog assumes that the reader is aware of the concept of Operators and how it works in K8’s environment. Before proceeding further, feel free to take a look at this concept using CoreOS BlogPost What it does?: The goal of the hyperconverged-cluster-operator (HCO) is to provide a single entrypoint for multiple operators - kubevirt, cdi, networking, etc… - where users can deploy and configure them in a single object. This operator is sometimes referred to as a “meta operator” or an “operator for operators”. Most importantly, this operator doesn’t replace or interfere with OLM. It only creates operator CRs, which is the user’s prerogative. How does it work?: In this blog post, I’d like to focus on the first method(i. e by deploying a HCO using a CustomResourceDefinition method)which might seem like the most immediate benefit of this feature. Let’s get started! Environment description: We can use HCO both on minikube and also on OpenShift 4. We will be using OpenShift 4 for HCO in this post. Note: All the commands for installing HCO on minikube will remain the same as documented below, please follow the link Install_HCO_minikube install minikube by adjusting the memory to your requirement(atleast 4GiB of RAM is recommended). Deploying HCO on OpenShift 4 Cluster. : OpenShift Installation steps for OpenShift 4 including video tutorial can be found here Upon successful installation of OpenShift, we will have a cluster consisting of 3 masters and 3 workers which can be used for HCO integration $oc versionClient Version: version. Info{Major: 4 , Minor: 1+ , GitVersion: v4. 1. 0 , GitCommit: 2793c3316 , GitTreeState: , BuildDate: 2019-04-23T07:46:06Z , GoVersion: , Compiler: , Platform: }Server Version: version. Info{Major: 1 , Minor: 12+ , GitVersion: v1. 12. 4+0ba401e , GitCommit: 0ba401e , GitTreeState: clean , BuildDate: 2019-03-31T22:28:12Z , GoVersion: go1. 10. 8 , Compiler: gc , Platform: linux/amd64 }Check the nodes: $oc get nodesNAME STATUS ROLES AGE VERSIONip-10-0-133-213. us-east-2. compute. internal Ready worker 12m v1. 13. 4+da48e8391ip-10-0-138-120. us-east-2. compute. internal Ready master 18m v1. 13. 4+da48e8391ip-10-0-146-51. us-east-2. compute. internal Ready master 18m v1. 13. 4+da48e8391ip-10-0-150-215. us-east-2. compute. internal Ready worker 12m v1. 13. 4+da48e8391ip-10-0-160-201. us-east-2. compute. internal Ready master 17m v1. 13. 4+da48e8391ip-10-0-168-28. us-east-2. compute. internal Ready worker 12m v1. 13. 4+da48e8391Clone the HCO repo: git clone https://github. com/kubevirt/hyperconverged-cluster-operator. gitThis gives all the necessary go packages and yaml manifests for the next steps. Let’s create a NameSpace for the HCO deployment oc create new-project kubevirt-hyperconvergedNow switch to the kubevirt-hyperconverged NameSpace oc project kubevirt-hyperconvergedNow launch all the CRD’s oc create -f deploy/converged/crds/hco. crd. yamloc create -f deploy/converged/crds/kubevirt. crd. yamloc create -f deploy/converged/crds/cdi. crd. yamloc create -f deploy/converged/crds/cna. crd. yamlLet’s see the yaml file for HCO Custom Resource Definition ---apiVersion: apiextensions. k8s. io/v1beta1kind: CustomResourceDefinitionmetadata: name: hyperconvergeds. hco. kubevirt. iospec: additionalPrinterColumns: - JSONPath: . metadata. creationTimestamp name: Age type: date - JSONPath: . status. phase name: Phase type: string group: hco. kubevirt. io names: kind: HyperConverged plural: hyperconvergeds shortNames: - hco - hcos singular: hyperconverged scope: Namespaced subresources: status: {} version: v1alpha1 versions: - name: v1alpha1 served: true storage: trueLet’s create ClusterRoleBindings, ClusterRole, ServerAccounts and Deployments for the operator $ oc create -f deploy/convergedAnd after verifying all the above resources we can now finally deploy our HCO custom resource $ oc create -f deploy/converged/crds/hco. cr. yamlWe can take a look at the YAML definition of the CustomResource of HCO: Let’s create ClusterRoleBindings, ClusterRole, ServerAccounts and Deployments for the operator $ oc create -f deploy/convergedAnd after verifying all the above resources we can now finally deploy our HCO custom resource $ oc create -f deploy/converged/crds/hco. cr. yamlWe can take a look at the YAML definition of the CustomResource of HCO: ---apiVersion: hco. kubevirt. io/v1alpha1kind: HyperConvergedmetadata: name: hyperconverged-clusterAfter successfully executing the above commands,we should be now be having a virt-controller pod, HCO pod, and a network-addon pod functional and can be viewed as below. Let’s see the deployed pods: $oc get podsNAME READY STATUS RESTARTS AGEcdi-apiserver-769fcc7bdf-rv8zt 1/1 Running 0 5m2scdi-deployment-8b64c5585-g7zfk 1/1 Running 0 5m2scdi-operator-c77447cc7-58ld2 1/1 Running 0 11mcdi-uploadproxy-8dcdcbff-rddl6 1/1 Running 0 5m2scluster-network-addons-operator-85cd468ff5-xjgds 1/1 Running 0 11mhyperconverged-cluster-operator-75dd9c96f9-pqvdk 1/1 Running 0 11mvirt-api-7f5bfb4c58-bkbhq 1/1 Running 0 4m59svirt-api-7f5bfb4c58-kkvwc 1/1 Running 1 4m59svirt-controller-6ccbfb7d5b-m7ljf 1/1 Running 0 3m49svirt-controller-6ccbfb7d5b-mbvlv 1/1 Running 0 3m49svirt-handler-hqz9d 1/1 Running 0 3m49svirt-operator-667b6c845d-jfnsr 1/1 Running 0 11mAlso the below deployments: $oc get deploymentsNAME READY UP-TO-DATE AVAILABLE AGEcdi-apiserver 1/1 1 1 10mcdi-deployment 1/1 1 1 10mcdi-operator 1/1 1 1 16mcdi-uploadproxy 1/1 1 1 10mcluster-network-addons-operator 1/1 1 1 16mhyperconverged-cluster-operator 1/1 1 1 16mvirt-api 2/2 2 2 9m58svirt-controller 2/2 2 2 8m49svirt-operator 1/1 1 1 16mNote Here, Once we applied the Custom Resource the operator took care of deploying the actual KubeVirt pods (virt-api, virt-controller and virt-handler), CDI pods(cdi-upload-proxy, cdi-apiserver, cdi-deployment, cdi-operator) and Network add-on pods ( cluster-network-addons-operator). We will need to wait until all of the resources are up and running. This can be done using the command above or by using the command above with the -wflag. After the HCO is up and running on the cluster, we should be able to see the info of CRD’s $oc get crds | grep kubevirtcdiconfigs. cdi. kubevirt. io 2019-05-07T20:22:17Zcdis. cdi. kubevirt. io 2019-05-07T20:20:58Zdatavolumes. cdi. kubevirt. io 2019-05-07T20:22:17Zhyperconvergeds. hco. kubevirt. io 2019-05-07T20:20:58Zkubevirtcommontemplatesbundles. kubevirt. io 2019-05-07T20:20:58Zkubevirtnodelabellerbundles. kubevirt. io 2019-05-07T20:20:58Zkubevirts. kubevirt. io 2019-05-07T20:20:58Zkubevirttemplatevalidators. kubevirt. io 2019-05-07T20:20:58Zkwebuis. kubevirt. io 2019-05-07T20:20:58Znetworkaddonsconfigs. networkaddonsoperator. network. kubevirt. io 2019-05-07T20:20:58Znodemaintenances. kubevirt. io 2019-05-07T20:20:58Zvirtualmachineinstancemigrations. kubevirt. io 2019-05-07T20:23:02Zvirtualmachineinstancepresets. kubevirt. io 2019-05-07T20:23:01Zvirtualmachineinstancereplicasets. kubevirt. io 2019-05-07T20:23:02Zvirtualmachineinstances. kubevirt. io 2019-05-07T20:23:01Zvirtualmachines. kubevirt. io 2019-05-07T20:23:02ZNote In OpenShift we can use both kubectl and oc interchangeably to interact with the cluster objects once HCO is up and running. You can also read more about CDI, CNA, ssp-operator, web-ui and KubeVirt:: CDI CNA KubeVirt ssp-operator kubevirt-web-ui NodeMaintenanceHCO using the OLM methodNote The complete architecture of OLM and its components that connect together can be understood using the link OLM_architecture Replace with your Docker organization as official operator-registry images for HCO will not be provided. Next, build and publish the converged HCO operator-registry image. cd deploy/convergedexport HCO_DOCKER_ORG=<docker_org>docker build --no-cache -t docker. io/$HCO_DOCKER_ORG/hco-registry:example -f Dockerfile . docker push docker. io/$HCO_DOCKER_ORG/hco-registry:exampleAs an example deployment, Let’s take the value of operator-registry image as docker. io/rthallisey/hyperconverged-cluster-operator:latestNow, Let’s create the kubevirt-hyperconverged NS as below oc create ns kubevirt-hyperconvergedCreate the OperatorGroup cat <<EOF | oc create -f -apiVersion: operators. coreos. com/v1alpha2kind: OperatorGroupmetadata: name: hco-operatorgroup namespace: kubevirt-hyperconvergedEOFCreate a Catalog Source backed by a grpc registry cat <<EOF | oc create -f -apiVersion: operators. coreos. com/v1alpha1kind: CatalogSourcemetadata: name: hco-catalogsource namespace: openshift-operator-lifecycle-manager imagePullPolicy: Alwaysspec: sourceType: grpc image: docker. io/rthallisey/hco-registry:v0. 1-8 displayName: KubeVirt HyperConverged publisher: Red HatEOFPlease wait until the hco-catalogsource pod comes up Next is to create a subscription, we can create a subscription from the OpenShift4 web interface as shown below: Once subscribed, we can create a kubevirt Hyperconverged Operator from UI: Install the HCO Operator: Please wait until the virt-operator, cdi-operator and cluster-network-addons-operator comes up. After they are up, its now time to launch the HCO-Custom Resource itself: Once the HCO Operator is deployed in the kubevirt-hyperconverged NS, we can see all the pods are up and running: We can verify the same from the CLI: oc get pods -n kubevirt-hyperconvergedNAME READY STATUS RESTARTS AGEcdi-apiserver-769fcc7bdf-b5v8n 1/1 Running 0 4m5scdi-deployment-8b64c5585-qs527 1/1 Running 0 4m4scdi-operator-77b8847b96-5kmb2 1/1 Running 0 4m55scdi-uploadproxy-8dcdcbff-xgnxf 1/1 Running 0 4m5scluster-network-addons-operator-584dff99b8-c5kz5 1/1 Running 0 4m55shco-operator-59b559bd44-lgdnm 1/1 Running 0 4m55skubevirt-ssp-operator-67b78446f7-l7rfv 1/1 Running 0 4m55skubevirt-web-ui-operator-9df6b67d9-mzf6s 1/1 Running 0 4m55snode-maintenance-operator-6b464dc85-v6vmw 1/1 Running 0 4m55svirt-api-7b56d7dd89-8s78r 1/1 Running 0 2m59svirt-api-7b56d7dd89-h75t8 1/1 Running 1 2m59svirt-controller-77c6d6d779-9qpp4 1/1 Running 0 2m32svirt-controller-77c6d6d779-vbbxg 1/1 Running 0 2m32svirt-handler-4bfb9 1/1 Running 0 2m32svirt-handler-ns97x 1/1 Running 0 2m32svirt-handler-q7wbh 1/1 Running 0 2m32svirt-operator-87d7c98b-mh8pg 1/1 Running 0 4m55svirt-operator-87d7c98b-p6mbd 1/1 Running 0 4m55sWe can see how OLM operator manages the HCO pods from the openshift-operator-lifecycle-manager NS: The above method demonstrates the integration of HCO operator in OpenShift4. So, after HCO is up and running we need to test it by deploying a small instance of a VM. To deploy an instance follow the instructions here minikube_quickstart: Conclusion: What to expect next? HCO achieved its goal which was to provide a single entrypoint for multiple operators - kubevirt, cdi, networking, etc. where users can deploy and configure them in a single object as seen above. Now, we can also launch the HCO through OLM. Note Until we publish (and consume) the HCO and component operators through operatorhub. io, this is a means to demonstrate the HCO workflow without OLMOnce we publish operators through Marketplace at OperatorHub. io, it will be available here " }, { - "id": 109, + "id": 108, "url": "/2019/changelog-v0.16.0.html", "title": "KubeVirt v0.16.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 16. 0: Released on: Fri Apr 5 23:18:22 2019 +0200 Bazel fixes Initial work to support upgrades (not finalized) Initial support for HyperV features Support propagation of MAC addresses to multus Support live migration cancellation Support for table input devices Support for generating OLM metadata Support for triggering VM live migration on node taints" }, { - "id": 110, + "id": 109, "url": "/2019/More-about-Kubevirt-metrics.html", "title": "More About Kubevirt Metrics", "author" : "fromanirh", "tags" : "metrics, prometheus", "body": "More about KubeVirt and Prometheus metricsIn this blog post, we update about the KubeVirt metrics, continuing the series started earlier this year. Since the previous post, the initial groundwork and first set of metrics was merged, and it is expectedto be available with KubeVirt v0. 15. 0 and onwards. Make sure you followed the steps described in the previous post to set up properly the monitoring stackin your KubeVirt-powered cluster. New metrics: Let’s look at the initial set of metrics exposed by KubeVirt 0. 15. 0: kubevirt_info{goversion= go1. 11. 4 ,kubeversion= v0. 15. 0-alpha. 0. 74+d7aaf3b5df4a60-dirty }kubevirt_vm_memory_resident_bytes{domain= $VM_NAME }kubevirt_vm_network_traffic_bytes_total{domain= $VM_NAME ,interface= $IFACE_NAME0 ,type= rx }kubevirt_vm_network_traffic_bytes_total{domain= $VM_NAME ,interface= $IFACE_NAME0 ,type= tx }kubevirt_vm_storage_iops_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= read }kubevirt_vm_storage_iops_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= write }kubevirt_vm_storage_times_ms_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= read }kubevirt_vm_storage_times_ms_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= write }kubevirt_vm_storage_traffic_bytes_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= read }kubevirt_vm_storage_traffic_bytes_total{domain= $VM_NAME ,drive= $DRIVE_NAME ,type= write }kubevirt_vm_vcpu_seconds{domain= $VM_NAME ,id= 0 ,state= 1 }The metrics expose versioning information according to the recommendations using the kubevirt_info metric; the other metrics should be self-explanatory. As we can expect, labels like domain, drive and interface depend on the specifics of the VM. type, however, is not and represents the subtype of the metric. Let’s now see a real life example, from this idle, diskless VM: apiVersion: kubevirt. io/v1alpha3kind: VirtualMachinemetadata: creationTimestamp: null labels: kubevirt. io/vm: vm-test-01 name: vm-test-01spec: running: false template: metadata: creationTimestamp: null labels: kubevirt. io/vm: vm-test-01 spec: domain: devices: interfaces: - name: default bridge: {} machine: type: resources: requests: memory: 64M networks: - name: default pod: {} terminationGracePeriodSeconds: 0status: {}Querying the endpoint (see below) yields something like kubevirt_info{goversion= go1. 11. 4 ,kubeversion= v0. 15. 0 } 1kubevirt_vm_memory_resident_bytes{domain= default_vm-test-01 } 4. 25984e+07kubevirt_vm_network_traffic_bytes_total{domain= default_vm-test-01 ,interface= vnet0 ,type= rx } 90kubevirt_vm_network_traffic_bytes_total{domain= default_vm-test-01 ,interface= vnet0 ,type= tx } 0kubevirt_vm_vcpu_seconds{domain= default_vm-test-01 ,id= 0 ,state= 1 } 613Example of how the kubevirt_vm_memory_resident_bytes metric looks like in the Prometheus UI Accessing the metrics programmatically: We can access the VM metrics using the standard Prometheus API. For example, let’s get the same data about the memory consumption we have seen above in the Prometheus UI. The following query yields all the data for the year 2019, aggregated every two hours. Not much data in this case, but beware of potentially large result sets. curl -g 'http://$CLUSTER_IP:9090/api/v1/query_range?query=kubevirt_vm_memory_resident_bytes&start=2019-01-01T00:00:00. 001Z&end=2019-12-31T23:59:59. 999Z&step=7200s' | json_ppWhich yields something like { data : { resultType : matrix , result : [ { values : [ [1552514400. 001, 44036096 ], [1552521600. 001, 42348544 ], [1552528800. 001, 44040192 ], [1552536000. 001, 42291200 ], [1552543200. 001, 42450944 ], [1552550400. 001, 43315200 ] ], metric : { __name__ : kubevirt_vm_memory_resident_bytes , job : kubevirt-prometheus-metrics , endpoint : metrics , pod : virt-handler-6ng6j , domain : default_vm-test-01 , instance : 10. 244. 0. 29:8443 , service : kubevirt-prometheus-metrics , namespace : kubevirt } } ] }, status : success }Troubleshooting tips: We strive to make the monitoring experience seamless, streamlined and working out of the box, but the stack is still evolving fast,and there are many options to actually set up the monitoring stack. Here we present some troubleshooting tips for the most common issues. prometheus targets: An underused feature of the Prometheus server is the target configuration. The Prometehus server exposes data about the targets it islooking for, so we can easily asses if the Prometheus server knows that it must scrape the kubevirt endpoints for metrics. We can see this both in the Prometheus UI: Or programmatically, with the Prometheus REST API: curl -g 'http://192. 168. 48. 7:9090/api/v1/targets' | json_pp(output trimmed for brevity): { data : { activeTargets : [ { lastError : , lastScrape : 2019-03-14T13:38:52. 886262669Z , scrapeUrl : https://10. 244. 0. 72:8443/metrics , labels : { service : kubevirt-prometheus-metrics , instance : 10. 244. 0. 72:8443 , job : kubevirt-prometheus-metrics , pod : virt-handler-6ng6j , endpoint : metrics , namespace : kubevirt }, discoveredLabels : { __meta_kubernetes_pod_phase : Running , __meta_kubernetes_endpoints_name : kubevirt-prometheus-metrics , __meta_kubernetes_endpoint_address_target_name : virt-handler-6ng6j , __meta_kubernetes_service_name : kubevirt-prometheus-metrics , __meta_kubernetes_pod_label_pod_template_generation : 1 , __meta_kubernetes_endpoint_port_name : metrics , __meta_kubernetes_service_label_app_kubernetes_io_managed_by : kubevirt-operator , __meta_kubernetes_pod_name : virt-handler-6ng6j , __address__ : 10. 244. 0. 72:8443 , __meta_kubernetes_pod_container_name : virt-handler , __meta_kubernetes_pod_container_port_number : 8443 , __meta_kubernetes_pod_controller_kind : DaemonSet , __meta_kubernetes_pod_label_kubevirt_io : virt-handler , __meta_kubernetes_pod_label_controller_revision_hash : 7bc9c7665b , __meta_kubernetes_pod_container_port_name : metrics , __meta_kubernetes_pod_ready : true , __scheme__ : https , __meta_kubernetes_namespace : kubevirt , __meta_kubernetes_pod_annotation_scheduler_alpha_kubernetes_io_tolerations : [{\ key\ :\ CriticalAddonsOnly\ ,\ operator\ :\ Exists\ }] , __meta_kubernetes_pod_container_port_protocol : TCP , __meta_kubernetes_pod_annotation_scheduler_alpha_kubernetes_io_critical_pod : , __meta_kubernetes_pod_label_prometheus_kubevirt_io : , __metrics_path__ : /metrics , __meta_kubernetes_pod_controller_name : virt-handler , __meta_kubernetes_pod_node_name : c7-allinone-2. kube. lan , __meta_kubernetes_endpoint_address_target_kind : Pod , __meta_kubernetes_endpoint_port_protocol : TCP , __meta_kubernetes_service_label_prometheus_kubevirt_io : , __meta_kubernetes_pod_uid : 7d65f67a-45c8-11e9-8567-5254000be9ec , job : kubevirt/kubevirt/0 , __meta_kubernetes_service_label_kubevirt_io : , __meta_kubernetes_pod_ip : 10. 244. 0. 72 , __meta_kubernetes_endpoint_ready : true , __meta_kubernetes_pod_host_ip : 192. 168. 48. 7 }, health : up } ], droppedTargets : [ { discoveredLabels : { __meta_kubernetes_service_name : virt-api , __meta_kubernetes_endpoint_address_target_name : virt-api-649859444c-dnvnm , __meta_kubernetes_pod_phase : Running , __meta_kubernetes_endpoints_name : virt-api , __meta_kubernetes_pod_container_name : virt-api , __meta_kubernetes_service_label_app_kubernetes_io_managed_by : kubevirt-operator , __meta_kubernetes_pod_name : virt-api-649859444c-dnvnm , __address__ : 10. 244. 0. 59:8443 , __meta_kubernetes_endpoint_port_name : , __meta_kubernetes_pod_container_port_name : virt-api , __meta_kubernetes_pod_ready : true , __meta_kubernetes_pod_label_kubevirt_io : virt-api , __meta_kubernetes_pod_controller_kind : ReplicaSet , __meta_kubernetes_pod_container_port_number : 8443 , __meta_kubernetes_namespace : kubevirt , __meta_kubernetes_pod_annotation_scheduler_alpha_kubernetes_io_tolerations : [{\ key\ :\ CriticalAddonsOnly\ ,\ operator\ :\ Exists\ }] , __scheme__ : https , __meta_kubernetes_pod_label_prometheus_kubevirt_io : , __meta_kubernetes_pod_annotation_scheduler_alpha_kubernetes_io_critical_pod : , __meta_kubernetes_pod_container_port_protocol : TCP , __metrics_path__ : /metrics , __meta_kubernetes_endpoint_address_target_kind : Pod , __meta_kubernetes_endpoint_port_protocol : TCP , __meta_kubernetes_pod_controller_name : virt-api-649859444c , __meta_kubernetes_pod_label_pod_template_hash : 649859444c , __meta_kubernetes_pod_node_name : c7-allinone-2. kube. lan , __meta_kubernetes_pod_host_ip : 192. 168. 48. 7 , job : kubevirt/kubevirt/0 , __meta_kubernetes_service_label_kubevirt_io : virt-api , __meta_kubernetes_endpoint_ready : true , __meta_kubernetes_pod_ip : 10. 244. 0. 59 , __meta_kubernetes_pod_uid : 7d5c3299-45c8-11e9-8567-5254000be9ec } } ] }, status : success }The Prometheus target state gives us a very useful information that shapes the next steps during the troubleshooting: does the Prometheus server know it should scrape our target? If no, we should check the Prometheus configuration, which is, in our case, driven by the Prometheus operator. Otherwise: can the Prometheus server access the endpoint? If no, we need to check the network connectivity/DNS configuration, or the endpoint itselfservicemonitors: servicemonitors are the objects the prometheus-operator consume to produce the right prometheus configuration that the server running in the clusterwill consume to scrape the metrics endpoints. See the documentation for all the details. We describe two of the most common pitfalls. create the servicemonitor in the right namespace: KubeVirt services run in the kubevirt namespace. Make sure to create the servicemonitor in the same namespace: kubectl get pods -n kubevirtNAME READY STATUS RESTARTS AGEvirt-api-649859444c-dnvnm 1/1 Running 2 19hvirt-api-649859444c-j9d94 1/1 Running 2 19hvirt-controller-7f49b8f77c-8kh46 1/1 Running 2 19hvirt-controller-7f49b8f77c-qk4hq 1/1 Running 2 19hvirt-handler-6ng6j 1/1 Running 2 19hvirt-operator-6c5db798d4-wr9wl 1/1 Running 6 19hkubectl get servicemonitor -n kubevirtNAME AGEkubevirt 16hActually, the servicemonitor should be created in the same namespace on which the kubevirt-prometheus-metrics service is defined: kubectl get svc -n kubevirtNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubevirt-prometheus-metrics ClusterIP 10. 109. 85. 101 <none> 443/TCP 19hvirt-api ClusterIP 10. 109. 162. 102 <none> 443/TCP 19hSee the KubeVirt documentation for all the details. configure the Prometheus instance to look in the right namespace: The prometheus server instance(s) run by default in their own namespace; this is the recommended configuration, and running them in the same kubevirt namespaceis not recommended anyway. So, make sure that the prometheus configuration we use looks in all the relevant namespaces, using something like apiVersion: monitoring. coreos. com/v1kind: Prometheusmetadata: name: prometheusspec: serviceAccountName: prometheus serviceMonitorNamespaceSelector: matchLabels: prometheus. kubevirt. io: serviceMonitorSelector: matchLabels: prometheus. kubevirt. io: resources: requests: memory: 400MiPlease note the usage of the serviceMonitorNamespaceSelector. See here and herefor more details. Namespaces must have the right label, prometheus. kubevirt. io, to be searched for servicemonitors. The kubevirt namespace is, of course, set correctly by default apiVersion: v1kind: Namespacemetadata: creationTimestamp: 2019-03-13T19:43:25Z labels: kubevirt. io: prometheus. kubevirt. io: name: kubevirt resourceVersion: 228178 selfLink: /api/v1/namespaces/kubevirt uid: 44a0783f-45c8-11e9-8567-5254000be9ecspec: finalizers: - kubernetesstatus: phase: ActiveBut please make sure that any other namespace you may want to monitor has the correct label. endpoint state: As in KubeVirt 0. 15. 0, virt-handler is the component which exposes the VM metrics through its Prometheus endpoint. Let’s check it reports the data correctly. First, let’s get the virt-handler IP address. We look out the instance we want to check with kubectl get pods -n kubevirtThen we query the address: kubectl get pod -o json -n KubeVirt $VIRT_HANDLER_POD | jq -r '. status. podIP'Prometheus tooling adds lots of metrics about internal state. In this case we care only about kubevirt-related metrics, so we filter out everything else with something like grep -E '^kubevirt_'Putting all together: curl -s -k -L https://$(kubectl get pod -o json -n KubeVirt virt-handler-6ng6j | jq -r '. status. podIP'):8443/metrics | grep -E '^kubevirt_'Let’s see how a healthy output looks like: kubevirt_info{goversion= go1. 11. 4 ,kubeversion= v0. 15. 0 } 1kubevirt_vm_memory_resident_bytes{domain= default_vm-test-01 } 4. 1168896e+07kubevirt_vm_network_traffic_bytes_total{domain= default_vm-test-01 ,interface= vnet0 ,type= rx } 90kubevirt_vm_network_traffic_bytes_total{domain= default_vm-test-01 ,interface= vnet0 ,type= tx } 0kubevirt_vm_vcpu_seconds{domain= default_vm-test-01 ,id= 0 ,state= 1 } 5173Please remember that some metrics can be correctly omitted for some VMs. In general, we should always see metrics about version (pseudo metric), memory, network, and CPU. But there are known cases on which not having storage metrics is expected and correct: for example this case, since we are using a diskless VM. Coming next: The KubeVirt team is still working to enhance and refine the metrics support. There are two main active topics. First, discussion is ongoing about adding more metrics,depending on the needs of the community or the needs of the ecosystem. Furthermore, there is work in progress to increase the robustnessand the reliability of the monitoring. We also have plans to improve the integration with kubernetes. Stay tuned for more updates! " }, { - "id": 111, + "id": 110, "url": "/2019/changelog-v0.15.0.html", "title": "KubeVirt v0.15.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 15. 0: Released on: Tue Mar 5 10:35:08 2019 +0100 CI: Several fixes Fix configurable number of KVM devices Narrow virt-handler permissions Use bazel for development builds Support for live migration with shared and non-shared disks Support for live migration progress tracking Support for EFI boot Support for libvirt 5. 0 Support for extra DHCP options Support for a hook to manipualte cloud-init metadata Support setting a VM serial number Support for exposing infra and VM metrics Support for a tablet input device Support for extra CPU flags Support for ignition metadata Support to set a default CPU model Update to go 1. 11. 5" }, { - "id": 112, + "id": 111, "url": "/2019/federated-kubevirt.html", "title": "Federated Kubevirt", "author" : "karmab", "tags" : "federation, kubefed, multicluster", "body": "Federated KubeVirtFederated KubeVirt is a reference implementation of deploying and managing KubeVirt across multipleKubernetes clusters using Federation-v2. Federation-v2 is an API and control-plane for actively managing multiple Kubernetes clusters and applications in thoseclusters. This makes Federation-v2 a viable solution for managing KubeVirt deployments that span multiple Kubernetesclusters. Federation-v2 Deployment: We assume federation is already deployed (using latest stable version) and you have configured your two clusters with context cluster1 and cluster2 Federated KubeVirt Deployment: We create KubeVirt namespace on first cluster: kubectl create ns kubevirtWe then create a placement for this namespace to get replicated to the second cluster. kubectl create -f federated_namespace. yamlNOTE: This yaml file was generated for version 0. 14. 0. but feel free to edit in order to use a more recent version of the operator We create the federated objects required as per kubevirt deployment: kubefed2 enable ClusterRoleBindingkubefed2 enable CustomResourceDefinitionAnd federated kubevirt itself, with placements so that it gets deployed at both sites. kubectl create -f federated_kubevirt-operator. yamlThis gets KubeVirt operator deployed at both sites, which creates the Custom Resource definition KubeVirt. We then deploy kubevirt by federating this CRD and creates an instance of it. kubefed2 enable kubevirtskubectl create -f federated_kubevirt-cr. yamlTo help starting/stopping vms and connecting to consoles, we install virtctl (which is aware of contexts): VERSION= v0. 14. 0 wget https://github. com/kubevirt/kubevirt/releases/download/$VERSION/virtctl-$VERSION-linux-amd64mv virtctl-$VERSION-linux-amd64 /usr/bin/virtctlchmod +x /usr/bin/virtctlKubeVirt Deployment Verification: Verify that all KubeVirt pods are running in the clusters: $ for c in cluster1 cluster2; do kubectl get pods -n kubevirt --context ${c} ; doneNAME READY STATUS RESTARTS AGEvirt-api-578cff4f56-2dsml 1/1 Running 0 3mvirt-api-578cff4f56-8mk27 1/1 Running 0 3mvirt-controller-7d8c4fbc4c-pfwll 1/1 Running 0 3mvirt-controller-7d8c4fbc4c-xvlvr 1/1 Running 0 3mvirt-handler-plfg7 1/1 Running 0 3mvirt-operator-67c86544f7-pnjjk 1/1 Running 0 5mNAME READY STATUS RESTARTS AGEvirt-api-578cff4f56-jjbmf 1/1 Running 0 3mvirt-api-578cff4f56-m6g2c 1/1 Running 0 3mvirt-controller-7d8c4fbc4c-tt9tz 1/1 Running 0 3mvirt-controller-7d8c4fbc4c-zf6hh 1/1 Running 0 3mvirt-handler-bldss 1/1 Running 0 3mvirt-operator-67c86544f7-zz5jc 1/1 Running 0 5mNow that KubeVirt is up and created its own custom resource types, we federate them: kubefed2 enable virtualmachineskubefed2 enable virtualmachineinstanceskubefed2 enable virtualmachineinstancepresetskubefed2 enable virtualmachineinstancereplicasetskubefed2 enable virtualmachineinstancemigrationsFor demo purposes, we also federate persistent volume claims: kubefed2 enable persistentvolumeclaimDemo Workflow: We create a federated persistent volume claim, pointing to an existing pv created at both sites, against the same nfs server: kubectl create -f federated_pvc. yamlWe then create a federated virtualmachine, with a placement so that it’s only created at cluster1 kubectl create -f federated_vm. yamlWe can check how its underlying pod only got created at one site: $ for c in cluster1 cluster2; do kubectl get pods --context ${c} ; doneNAME READY STATUS RESTARTS AGEvirt-launcher-testvm2-9dq48 2/2 Running 0 6mNo resources found. Once the vm is up, we connect to it and format its secondary disk, put some data there Playing with placement resource, we have it stopping at cluster1 and launch at cluster2. kubectl patch federatedvirtualmachineplacements testvm2 --type=merge -p '{ spec :{ clusterNames : [ cluster2 ]}}'We can then connect there and see how the data is still available!!! Final ThoughtsFederating KubeVirt allows interesting use cases around kubevirt like disaster recovery scenarios. More over, the pattern used to federate this product can be seen as a generic way to federate modern applications: federate operator federate the CRD deploying the app (either at both sites or selectively) federate the CRDS handled by the app" }, { - "id": 113, + "id": 112, "url": "/2019/changelog-v0.14.0.html", "title": "KubeVirt v0.14.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 14. 0: Released on: Mon Feb 4 22:04:14 2019 +0100 CI: Several stabilizing fixes docs: Document the KubeVirt Razor build: golang update Update to Kubernetes 1. 12 Update CDI Support for Ready and Created Operator conditions Support (basic) EFI Support for generating cloud-init network-config" }, { - "id": 114, + "id": 113, "url": "/2019/An-overview-to-KubeVirt-metrics.html", "title": "An Overview To Kubevirt Metrics", "author" : "tripledes", "tags" : "metrics, prometheus, grafana", "body": "KubeVirt and Prometheus metricsIn this blog post, we will explore the current state of integration between KubeVirt and Prometheus. For that, we’ll be using the following bits and pieces: minikube, as local Kubernetes deployment. kube-prometheus bundle, to quickly and easily deploy the whole monitoring stack, Promtheus, Grafana, … KubeVirtStarting Kubernetes up: Installing minikube is detailed on the Installation section of the project’s README. If you happen to be running Fedora 29, this Copr repository can be used. Following the documentation on both minikube and kube-prometheus bundle, the command I used to start Kubernetes is the following one: $ minikube start --cpus 2 --disk-size 30g --memory 10240 --vm-driver kvm2 --feature-gates=DevicePlugins=true --bootstrapper=kubeadm --extra-config=kubelet. authentication-token-webhook=true --extra-config=kubelet. authorization-mode=Webhook --extra-config=scheduler. address=0. 0. 0. 0 --extra-config=controller-manager. address=0. 0. 0. 0 --kubernetes-version=v1. 11. 5 With that command you’ll get a VM, using 2 vCPUS with 10GiB of RAM and running Kubernetes version 1. 11. 5, please adjust that to your needs. Installing the monitoring stack: Follow this README for step by step installation instructions. Once installed, we can verify everything is up and running by checking out the monitoring namespace: $ kubectl get all -n monitoringNAME READY STATUS RESTARTS AGEpod/alertmanager-main-0 2/2 Running 2 3dpod/alertmanager-main-1 2/2 Running 2 3dpod/alertmanager-main-2 2/2 Running 2 3dpod/grafana-7b9578fb4-jb2ts 1/1 Running 1 3dpod/kube-state-metrics-fb7d5f59b-dr5zp 4/4 Running 5 3dpod/node-exporter-jf2zk 2/2 Running 2 3dpod/prometheus-adapter-69bd74fc7-vlfcq 1/1 Running 2 3dpod/prometheus-k8s-0 3/3 Running 4 3dpod/prometheus-k8s-1 3/3 Running 4 3dpod/prometheus-operator-6db8dbb7dd-5cb6r 1/1 Running 2 3dNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEservice/alertmanager-main ClusterIP 10. 100. 239. 1 <none> 9093/TCP 3dservice/alertmanager-operated ClusterIP None <none> 9093/TCP,6783/TCP 3dservice/grafana ClusterIP 10. 104. 160. 71 <none> 3000/TCP 3dservice/kube-state-metrics ClusterIP None <none> 8443/TCP,9443/TCP 3dservice/node-exporter ClusterIP None <none> 9100/TCP 3dservice/prometheus-adapter ClusterIP 10. 109. 240. 201 <none> 443/TCP 3dservice/prometheus-k8s ClusterIP 10. 103. 208. 241 <none> 9090/TCP 3dservice/prometheus-operated ClusterIP None <none> 9090/TCP 3dservice/prometheus-operator ClusterIP None <none> 8080/TCP 3dNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGEdaemonset. apps/node-exporter 1 1 1 1 1 beta. kubernetes. io/os=linux 3dNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGEdeployment. apps/grafana 1 1 1 1 3ddeployment. apps/kube-state-metrics 1 1 1 1 3ddeployment. apps/prometheus-adapter 1 1 1 1 3ddeployment. apps/prometheus-operator 1 1 1 1 3dNAME DESIRED CURRENT READY AGEreplicaset. apps/grafana-7b9578fb4 1 1 1 3dreplicaset. apps/kube-state-metrics-6dc79554cd 0 0 0 3dreplicaset. apps/kube-state-metrics-fb7d5f59b 1 1 1 3dreplicaset. apps/prometheus-adapter-69bd74fc7 1 1 1 3dreplicaset. apps/prometheus-operator-6db8dbb7dd 1 1 1 3dNAME DESIRED CURRENT AGEstatefulset. apps/alertmanager-main 3 3 3dstatefulset. apps/prometheus-k8s 2 2 3d So we can see that everything is up and running and we can even test that the access to Grafana and PromUI are working: For Grafana, forward the port 3000 as follows and access http://localhost:3000: $ kubectl --namespace monitoring port-forward svc/grafana 3000 At the time of this writing, the username and password for Grafana are both admin. For PromUI, forward the port 9090 as follows and access http://localhost:9090: $ kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 Let’s deploy KubeVirt and dig on the metrics components: Deploy KubeVirt using the official documentation. This blog post uses version 0. 11. 0. Metrics: If you’ve installed KubeVirt before, there’s a service that might be unfamiliar to you, service/kubevirt-prometheus-metrics, this service uses a selector set to match the label prometheus. kubevirt. io: ““ which is included on all the main KubeVirt components, like the virt-api, virt-controllers and virt-handler. The kubevirt-prometheus-metrics also exposes the metrics port set to 443 so Promtheus can scrape the metrics for all the components through it. Let’s have a first look to the metrics that are exported: $ kubectl --namespace kube-system port-forward svc/kubevirt-prometheus-metrics 8443:443$ curl -k https://localhost:8443/metrics# TYPE go_gc_duration_seconds summarygo_gc_duration_seconds{quantile= 0 } 2. 856e-05go_gc_duration_seconds{quantile= 0. 25 } 8. 4197e-05go_gc_duration_seconds{quantile= 0. 5 } 0. 000148234go_gc_duration_seconds{quantile= 0. 75 } 0. 000358119go_gc_duration_seconds{quantile= 1 } 0. 014123096go_gc_duration_seconds_sum 0. 481708749go_gc_duration_seconds_count 328. . . # HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. # TYPE rest_client_requests_total counterrest_client_requests_total{code= 200 ,host= 10. 96. 0. 1:443 ,method= GET } 125rest_client_requests_total{code= 200 ,host= 10. 96. 0. 1:443 ,method= PATCH } 284rest_client_requests_total{code= 404 ,host= 10. 96. 0. 1:443 ,method= GET } 284rest_client_requests_total{code= <error> ,host= 10. 96. 0. 1:443 ,method= GET } 2 As can be seen in the output from curl, there are quite some metrics, but we’ll focus here mostly about the ones starting by rest as the others are mostly about Golang runtime and few other process internals, so the metrics list will be reduced to the following: rest_client_request_latency_seconds_bucket rest_client_request_latency_seconds_count rest_client_request_latency_seconds_sum rest_client_requests_total The rest_client_request_latency_seconds, represents the latency for each request being made to the API components broken down by verb and URL. The rest_client_requests_total, represents the number of HTTP requests, partitioned by status code, method, and host. Now, following again KubeVirt’s docs, we need to deploy two resources: A Prometheus resource. Just copy the YAML snippet from KubeVirt’s docs and apply it as follows: $ kubectl apply -f kubevirt-prometheus. yml -n kube-system A ServiceMonitor resource, again, take the YAML snippet from KubeVirt’s docs and apply it to the cluster: $ kubectl apply -f kubevirt-servicemonitor. yml -n kube-system At this point we’re ready to fire up PromUI and start querying, accessing to it at http://localhost:9090, here are some examples: Let’s query for the rest_client_requests_total filterying by service name kubevirt-prometheus-metrics: Now, the same metric, but let’s apply rate function, on 1 minute intervals, looking at the graph tab we can see each component, with different HTTP status codes, methods (verbs) and more labels being added by Prometheus itself: On both images, there is one status code, that I feel it’s worth a special mention, as it might be confusing, it’s <error>. This is not actual HTTP code, obvsiously, but rather a real error logged out by the component in question, in this case it was the pod virt-handler-2pxcb. What does it mean? To keep the variaty of metrics under control, any error string logged out during a request is translated by the string we see in the images, <error>, and it’s meant for us to notice that there might be issues that need our attention. Checking the pod for errors in the logs we can find the following ones: $ kubectl logs virt-handler-2pxcb -n kube-system | grep -i error{ component : virt-handler , level : error , msg : kubevirt. io/kubevirt/pkg/virt-handler/vm. go:440: Failed to list *v1. VirtualMachineInstance: Get https://10. 96. 0. 1:443/apis/kubevirt. io/v1alpha2/virtualmachineinstances?labelSelector=kubevirt. io%2FnodeName+in+%28minikube%29\u0026limit=500\u0026resourceVersion=0: dial tcp 10. 96. 0. 1:443: i/o timeout , pos : reflector. go:205 , timestamp : 2018-12-21T09:46:27. 921051Z }{ component : virt-handler , level : error , msg : kubevirt. io/kubevirt/pkg/virt-handler/vm. go:441: Failed to list *v1. VirtualMachineInstance: Get https://10. 96. 0. 1:443/apis/kubevirt. io/v1alpha2/virtualmachineinstances?labelSelector=kubevirt. io%2FmigrationTargetNodeName+in+%28minikube%29\u0026limit=500\u0026resourceVersion=0: dial tcp 10. 96. 0. 1:443: i/o timeout , pos : reflector. go:205 , timestamp : 2018-12-21T09:46:27. 921168Z }Looking back at the first image, we can see the information there, matches what the logs say, exactly two ocurrances with method GET. So far, in this case, it’s nothing to worry about as it seems to be a temporary issue, but if the count grows, it’s likely there are serious issues that need fixing. With that in mind, it’s not hard to create a dashboard in Grafana that would give us a glimpse of how KubeVirt is doing. The three rectangles on the top, are singlestat, in Grafana’s own terms, and those are first applying rate() by 5 minutes samples, then applying count() to aggragate the results in a single value. So the query is: count(rate(rest_client_requests_total{service=”kubevirt-prometheus-metrics”,code=”XXX”} [5m]))Replacing XXX by 404, 500 or <error>. The singlestat is useful for counters and for quickly seeing how a system/service is doing, as thresholds can be defined, changing the background (or the value) color based on the current measured amount. The graph below, runs the same query, but without the aggregation so we can see each component with different status codes and verbs. Closing thoughts: Even though the current state might not look very exciting, it’s a start, we can now monitor the KubeVirt components and make sure we get alarms when something is wrong. Besides, there’s more to come, the KubeVirt team is working hard to bring VM metrics to the table. Once this work is completed, we’ll write another blog post, so stay tuned! " }, { - "id": 115, + "id": 114, "url": "/2019/changelog-v0.13.0.html", "title": "KubeVirt v0.13.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 13. 0: Released on: Tue Jan 15 08:26:25 2019 +0100 CI: Fix virt-api race API: Remove volumeName from disks" }, { - "id": 116, + "id": 115, "url": "/2019/changelog-v0.12.0.html", "title": "KubeVirt v0.12.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 12. 0: Released on: Fri Jan 11 22:22:02 2019 +0100 Introduce a KubeVirt Operator for KubeVirt life-cycle management Introduce dedicated kubevirt namespace Support VMI ready conditions Support vCPU threads and sockets Support scale and HPA for VMIRS Support to pass NTP related DHCP options Support guest IP address reporting via qemu guest agent Support for live migration with shared storage Support scheduling of VMs based on CPU family Support masquerade network interface binding" }, { - "id": 117, + "id": 116, "url": "/2018/kubevirt-autolatest.html", "title": "Kubevirt Autolatest", "author" : "karmab", "tags" : "gcp, autodeployer", "body": "How to easily test specific versions of KubeVirt on GCPAt KubeVirt, we created cloud images on gcp and aws to ease evaluation of the project. It works fine, has a dedicated CI and is updated when new releases come out, but i wanted to go a little bit further and see if i could easily spawn a vm which would default to latest versions of the components, or that would allow me to test a given PR without focusing on deployment details So What did I come up with: the image is called autolatest and can be found on Google Storage I assume that you have a Google account with an active payment methodor a free trial. You also need to make sure that you have a default keypairinstalled. From console. cloud. google. com, go to “Compute Engine”, “Images” and then clickon “Create Image” or click this link. Fill in the following data: Name: kubevirt-autodeployer Family: centos-7 (optional) Source: cloud storage file Cloud storage file: kubevirt-button/autolatest-v0. 1. tar. gz Then you can create a new instance based on this image. Go to “Compute Engine”, then to “VM instances”, and then click on “Create instance”. It’s recommended to select: the 2 CPU / 7. 5GB instance a zone that supports the Haswell CPU Platform or newer (for nested virtualization to work), us-central1-b for instanceUnder boot disk, select the image that you created above. If you want to use specific versions for any of the following components, create the corresponding metadata entry in Management/Metadata k8s_version flannel_version kubevirt_version cdi_version Now hit Create to start the instance. Once vm is up, you should be able to connect and see through the presented banner which components got deployed What happened under the hood: When the vm boots, it executes a boot script which does the following: Gather metadata for the following variables k8s_version flannel_version kubevirt_version cdi_version If those metadata variables are not set, rely on values fetched from this url Once those variables are set, the corresponding elements are deployed. When latest or a PR number is specified for one of the components, we gather the corresponding latest release tag from the product repo and use it to deploy When master or a number is specified for kubevirt, we build containers from source and deploy kubevirt with them The full script is available here and can be adapted to other platforms " }, { - "id": 118, + "id": 117, "url": "/2018/changelog-v0.11.0.html", "title": "KubeVirt v0.11.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 11. 0: Released on: Thu Dec 6 10:15:51 2018 +0100 API: registryDisk got renamed to containreDisk CI: User OKD 3. 11 Fix: Tolerate if the PVC has less capacity than expected Aligned to use ownerReferences Update to libvirt-4. 10. 0 Support for VNC on MAC OSX Support for network SR-IOV interfaces Support for custom DHCP options Support for VM restarts via a custom endpoint Support for liveness and readiness probes" }, { - "id": 119, + "id": 118, "url": "/2018/kubevirt-at-kubecon-na.html", "title": "Kubevirt At Kubecon Na", "author" : "xsgordon", "tags" : "kubecon, conference", "body": "KubeCon + CloudNativeCon North America 2018 (Seattle, December 11-13) israpidly approaching and promises to be another jam packed event for followers ofcloud-native technologies. Given the increasing scope of the event we thought it might be useful to preparea guide to where you are likely to find KubeVirt featured at the event. These sessions will provide you an opportunity not just to learn aboutKubeVirt’s to turning Kubernetes into a common platform for containers andvirtual machines, but also to meet other members of the community: KubeVirt Introductory Birds of a Feather (BoF) Session led by RyanHallisey, Software Engineer, Red Hat and Daniel Gonzalez Nothnagel, CloudInfrastructure Developer, SAP Tuesday, December 11 @ 10:50 AM PST KubeVirt Deep Dive Birds of a Feather (BoF) Session led by Scott Collier,Consulting Engineer, Red Hat and Vishesh Ajay Tanksale, nVidia Tuesday, December 11 @ 1:45 PM PST Connecting and Testing Virtual Network Topologies on Kubernetes presentedby Gage Orsburn, Software Architect, One Source Integrations and RichRenner, Solutions Architect, One Source Integrations Tuesday, December 11 @ 2:35 PM PST Running VM Workloads Side by Side with Container Workloads presented bySebastian Scheele, Co-founder and CEO, Loodse Thursday, December 13 @ 10:50 AM PST As previously announced on the kubevirt-dev mailing list we willalso be holding a users and contributors meetup on the Tuesday evening of theevent: Location: Sheraton Grand, Seattle Room: Aspen Room, 2nd Floor (Union Street Tower) Date: Tuesday, December 11th Time: 6:45 - 8:45 PM PSTWhile we wont have Ice Cube, we do plan to have food, so if you plan to attendplease register your interest so that we can cater accordingly! We lookforward to seeing you all at the event! Don’t forget to follow kubevirton Twitter for updates throughout! " }, { - "id": 120, + "id": 119, "url": "/2018/ignition-support.html", "title": "Ignition Support", "author" : "karmab", "tags" : "ignition, coreos, rhcos", "body": "Introduction: Ignition is a new provisioning utility designed specifically for CoreOS/RhCOS. At the most basic level, it is a tool for manipulating a node during early boot. This includes: Partitioning disks. Formatting partitions. Writing files (regular files, systemd units, networkd units). Configuring users and their associated ssh public keys. Recently, we added support for it in KubeVirt so ignition data can now be embedded in a vm specification, through a dedicated annotation. Ignition support is still needed in the guest operating system. Enabling Ignition Support: Ignition Support has to be enabled through a feature gate. This is achieved by creating (or editing ) the kubevirt-config ConfigMap in the kubevirt namespace. A minimal config map would look like this: apiVersion: v1kind: ConfigMapmetadata: name: kubevirt-config namespace: kubevirt labels: kubevirt. io: data: feature-gates: ExperimentalIgnitionSupportMake sure to delete kubevirt related pods afterward for the configuration to be taken into account: kubectl delete pod --all -n kubevirtWorkThrough: We assume that you already have a Kubernetes or OpenShift cluster running with KubeVirt installed. Step 1: Create The following VM spec in the file myvm1. yml: apiVersion: kubevirt. io/v1alpha3kind: VirtualMachinemetadata: name: myvm1spec: running: true template: metadata: labels: kubevirt. io/size: small annotations: kubevirt. io/ignitiondata: | { ignition : { config : {}, version : 2. 2. 0 }, networkd : {}, passwd : { users : [ { name : core , sshAuthorizedKeys : [ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/AvM9VbO2yiIb9AillBp/kTr8jqIErRU1LFKqhwPTm4AtVIjFSaOuM4AlspfCUIz9IHBrDcZmbcYKai3lC3JtQic7M/a1OWUjWE1ML8CEvNsGPGu5yNVUQoWC0lmW5rzX9c6HvH8AcmfMmdyQ7SgcAnk0zir9jw8ed2TRAzHn3vXFd7+saZLihFJhXG4zB8vh7gJHjLfjIa3JHptWzW9AtqF9QsoBY/iu58Rf/hRnrfWscyN3x9pGCSEqdLSDv7HFuH2EabnvNFFQZr4J1FYzH/fKVY3Ppt3rf64UWCztDu7L44fPwwkI7nAzdmQVTaMoD3Ej8i7/OSFZsC2V5IBT kboumedh@bumblefoot ] }, ] } } spec: domain: devices: disks: - name: containerdisk disk: bus: virtio interfaces: - name: default bridge: {} resources: requests: memory: 64M networks: - name: default pod: {} volumes: - name: containerdisk containerDisk: image: kubevirt/fedora-cloud-container-disk-demoNote We simply inject the ignition data as a string in vm/spec/domain/spec/metadata/annotations, using kubevirt. io/ignitiondata as an annotation Step 2: Create the VM: $ kubectl apply -f myvm1. ymlvirtualmachine myvm1 createdAt this point, when VM boots, ignition data will be injected. How does it work under the hood?: We currently leverage Pass-through of arbitrary qemu commands although there is some discussion around using a metadata server instead Summary: Ignition Support brings the ability to run CoreOS/RHCOS distros on KubeVirt and to customize them at boot time. " }, { - "id": 121, + "id": 120, "url": "/2018/new-volume-types.html", "title": "New Volume Types", "author" : "slintes", "tags" : "volume types, serviceaccount", "body": "Introduction: Recently three new volume types were introduced, which can be used for additional VM disks, and allow better integration of virtual machines withwell known Kubernetes resources. ConfigMap and Secret: Both ConfigMapsand Secrets are used to provide configuration settings and credentials to Pods. In order to use them in your VM too, you can add them as additional disks, using the new configMapand secret volume types. ServiceAccount: Kubernetes pods can be configured to get a special type of secret injected, which can be used foraccessing the Kubernetes API. With the third new volume type serviceAccount you can get this information into your VM, too. Example: We assume that you already have a Kubernetes or OpenShift cluster running with KubeVirt installed. Step 1: Create a ConfigMap and Secret, which will be used in your VM: $ kubectl create secret generic mysecret --from-literal=PASSWORD=hiddensecret mysecret created$ kubectl create configmap myconfigmap --from-literal=DATABASE=stagingconfigmap myconfigmap createdStep 2: Define a VirtualMachineInstance which uses all three new volume types, and save it to vmi-fedora. yaml. Note how we add 3 disks for the ConfigMap and Secret we just created, and for the default ServiceAccount. In order to automount these disks, we also add a cloudInitNoCloud disk with mount instructions. Details onhow to do this might vary depending on the VM’s operating system. apiVersion: kubevirt. io/v1alpha2kind: VirtualMachineInstancemetadata: name: vmi-fedoraspec: domain: devices: disks: - name: registrydisk volumeName: registryvolume - name: cloudinitdisk volumeName: cloudinitvolume - name: configmap-disk serial: configmap volumeName: configmap-volume - name: secret-disk serial: secret volumeName: secret-volume - name: serviceaccount-disk serial: serviceaccount volumeName: serviceaccount-volume resources: requests: memory: 1024M volumes: - name: registryvolume registryDisk: image: kubevirt/fedora-cloud-container-disk-demo:latest - name: cloudinitvolume cloudInitNoCloud: userData: |- #cloud-config password: fedora chpasswd: { expire: False } bootcmd: # mount the disks - mkdir /mnt/{myconfigmap,mysecret,myserviceaccount} - mount /dev/disk/by-id/ata-QEMU_HARDDISK_configmap /mnt/myconfigmap - mount /dev/disk/by-id/ata-QEMU_HARDDISK_secret /mnt/mysecret - mount /dev/disk/by-id/ata-QEMU_HARDDISK_serviceaccount /mnt/myserviceaccount - name: configmap-volume configMap: name: myconfigmap - name: secret-volume secret: secretName: mysecret - name: serviceaccount-volume serviceAccount: serviceAccountName: defaultStep 3: Create the VMI: $ kubectl apply -f vmi-fedora. yamlvirtualmachineinstance vmi-fedora createdStep 4: Inspect the new disks: $ virtctl console vmi-fedoravmi-fedora login: fedoraPassword:[fedora@vmi-fedora ~]$ ls -R /mnt//mnt/:myconfigmap mysecret myserviceaccount/mnt/myconfigmap:DATABASE/mnt/mysecret:PASSWORD/mnt/myserviceaccount:ca. crt namespace token[fedora@vmi-fedora ~]$ cat /mnt/myconfigmap/DATABASEstaging[fedora@vmi-fedora ~]$ cat /mnt/mysecret/PASSWORDhidden[fedora@vmi-fedora ~]$ cat /mnt/myserviceaccount/namespacedefaultSummary: With these new volume types KubeVirt further improves the integration with native Kubernetes resources. Learn more about all available volume types on the userguide. " }, { - "id": 122, + "id": 121, "url": "/2018/changelog-v0.10.0.html", "title": "KubeVirt v0.10.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 10. 0: Released on: Thu Nov 8 15:21:34 2018 +0100 Support for vhost-net Support for block multi-queue Support for custom PCI addresses for virtio devices Support for deploying KubeVirt to a custom namespace Support for ServiceAccount token disks Support for multus backed networks Support for genie backed networks Support for kuryr backed networks Support for block PVs Support for configurable disk device caches Support for pinned IO threads Support for virtio net multi-queue Support for image upload (depending on CDI) Support for custom entity lists with more VM details (cusomt columns) Support for IP and MAC address reporting of all vNICs Basic support for guest agent status reporting More structured logging Better libvirt error reporting Stricter CR validation Better ownership references Several test improvements" }, { - "id": 123, + "id": 122, "url": "/2018/CDI-DataVolumes.html", "title": "Cdi Datavolumes", "author" : "tripledes", "tags" : "cdi, datavolumes", "body": "CDI DataVolumesContainerized Data Importer (or CDI for short), is a data import service for Kubernetes designed with KubeVirt in mind. Thanks to CDI, we can now enjoy the addition of DataVolumes, which greatly improve the workflow of managing KubeVirt and its storage. What it does: DataVolumes are an abstraction of the Kubernetes resource, PVC (Persistent Volume Claim) and it also leverages other CDI features to ease the process of importing data into a Kubernetes cluster. DataVolumes can be defined by themselves or embedded within a VirtualMachine resource definition, the first method can be used to orchestrate events based on the DataVolume status phases while the second eases the process of providing storage for a VM. How does it work?: In this blog post, I’d like to focus on the second method, embedding the information within a VirtualMachine definition, which might seem like the most immediate benefit of this feature. Let’s get started! Environment description: OpenShift For testing DataVolumes, I’ve spawned a new OpenShift cluster, using dynamic provisioning for storage running OpenShift Cloud Storage (GlusterFS), so the Persistent Volumes (PVs for short) are created on-demand. Other than that, it’s a regular OpenShift cluster, running with a single master (also used for infrastructure components) and two compute nodes. CDI We also need CDI, of course, CDI can be deployed either together with KubeVirt or independently, the instructions can be found in the project’s GitHub repo. KubeVirt Last but not least, we’ll need KubeVirt to run the VMs that will make use of the DataVolumes. Enabling DataVolumes feature: As of this writing, DataVolumes have to be enabled through a feature gate, for KubeVirt, this is achieved by creating the kubevirt-config ConfigMap on the namespace where KubeVirt has been deployed, by default kube-system. Let’s create the ConfigMap with the following definition: ---apiVersion: v1data: feature-gates: DataVolumeskind: ConfigMapmetadata: name: kubevirt-config namespace: kube-system$ oc create -f kubevirt-config-cm. ymlAlternatively, the following one-liner can also be used to achieve the same result: $ oc create configmap kubevirt-config --from-literal feature-gates=DataVolumes -n kube-systemIf the ConfigMap was already present on the system, just use oc edit to add the DataVolumes feature gate under the data field like the YAML above. If everything went as expected, we should see the following log lines on the virt-controller pods: level=info timestamp=2018-10-09T08:16:53. 602400Z pos=application. go:173 component=virt-controller msg= DataVolume integration enabled NOTE: It’s worth noting the values in the ConfigMap are not dynamic, in the sense that virt-controller and virt-api will need to be restarted, scaling their deployments down and back up again, just remember to scale it up to the same number of replicas they previously had. Creating a VirtualMachine embedding a DataVolume: Now that the cluster is ready to use the feature, let’s have a look at our VirtualMachine definition, which includes a DataVolume. apiVersion: kubevirt. io/v1alpha2kind: VirtualMachinemetadata: labels: kubevirt. io/vm: testvm1 name: testvm1spec: dataVolumeTemplates: - metadata: name: centos7-dv spec: pvc: accessModes: - ReadWriteOnce resources: requests: storage: 10Gi source: http: url: https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 running: true template: metadata: labels: kubevirt. io/vm: testvm1 spec: domain: cpu: cores: 1 devices: disks: - volumeName: test-datavolume name: disk0 disk: bus: virtio - name: cloudinitdisk volumeName: cloudinitvolume cdrom: bus: virtio resources: requests: memory: 8Gi volumes: - dataVolume: name: centos7-dv name: test-datavolume - cloudInitNoCloud: userData: | #cloud-config hostname: testvm1 users: - name: kubevirt gecos: KubeVirt Project sudo: ALL=(ALL) NOPASSWD:ALL passwd: $6$JXbc3063IJir. e5h$ypMlYScNMlUtvQ8Il1ldZi/mat7wXTiRioGx6TQmJjTVMandKqr. jJfe99. QckyfH/JJ. OdvLb5/OrCa8ftLr. shell: /bin/bash home: /home/kubevirt lock_passwd: false name: cloudinitvolumeThe new addition to a regular VirtualMachine definition is the dataVolumeTemplates block, which will trigger the import of the CentOS-7 cloud image defined on the url field, storing it on a PV, the resulting DataVolume will be named centos7-dv, being referenced on the volumes section, it will serve as the boot disk (disk0) for our VirtualMachine. Going ahead and applying the above manifest to our cluster results in the following set of events: The DataVolume is created, triggering the creation of a PVC and therefore, using the dynamic provisioning configured on the cluster, a PV is provisioned to satisfy the needs of the PVC. An importer pod is started, this pod is the one actually downloading the image defined in the url field and storing it on the provisioned PV. Once the image has been downloaded and stored, the DataVolume status changes to Succeeded, from that point the virt launcher controller will go ahead and schedule the VirtualMachine. Taking a look to the resources created after applying the VirtualMachine manifest, we can see the following: $ oc get podsNAME READY STATUS RESTARTS AGEimporter-centos7-dv-t9zx2 0/1 Completed 0 11mvirt-launcher-testvm1-cpt8n 1/1 Running 0 8mLet’s look at the importer pod logs to understand what it did: $ oc logs importer-centos7-dv-t9zx2I1009 12:37:45. 384032 1 importer. go:32] Starting importerI1009 12:37:45. 393461 1 importer. go:37] begin import processI1009 12:37:45. 393519 1 dataStream. go:235] copying https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 to /data/disk. img . . . I1009 12:37:45. 393569 1 dataStream. go:112] IMPORTER_ACCESS_KEY_ID and/or IMPORTER_SECRET_KEY are emptyI1009 12:37:45. 393606 1 dataStream. go:298] create the initial Reader based on the endpoint's https schemeI1009 12:37:45. 393665 1 dataStream. go:208] Attempting to get object https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 via http clientI1009 12:37:45. 762330 1 dataStream. go:314] constructReaders: checking compression and archive formats: /centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2I1009 12:37:45. 841564 1 dataStream. go:323] found header of type qcow2 I1009 12:37:45. 841618 1 dataStream. go:338] constructReaders: no headers found for file /centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 I1009 12:37:45. 841635 1 dataStream. go:340] done processing /centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 headersI1009 12:37:45. 841650 1 dataStream. go:138] NewDataStream: endpoint https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 's computed byte size: 8589934592I1009 12:37:45. 841698 1 dataStream. go:566] Validating qcow2 fileI1009 12:37:46. 848736 1 dataStream. go:572] Doing streaming qcow2 to raw conversionI1009 12:40:07. 546308 1 importer. go:43] import completeSo, following the events we see, it fetched the image from the defined url, validated its format and converted it to raw for being used by qemu. $ oc describe dv centos7-dvName: centos7-dvNamespace: test-dvLabels: kubevirt. io/created-by=1916da5f-cbc0-11e8-b467-c81f666533c3Annotations: kubevirt. io/owned-by=virt-controllerAPI Version: cdi. kubevirt. io/v1alpha1Kind: DataVolumeMetadata: Creation Timestamp: 2018-10-09T12:37:34Z Generation: 1 Owner References: API Version: kubevirt. io/v1alpha2 Block Owner Deletion: true Controller: true Kind: VirtualMachine Name: testvm1 UID: 1916da5f-cbc0-11e8-b467-c81f666533c3 Resource Version: 2474310 Self Link: /apis/cdi. kubevirt. io/v1alpha1/namespaces/test-dv/datavolumes/centos7-dv UID: 19186b29-cbc0-11e8-b467-c81f666533c3Spec: Pvc: Access Modes: ReadWriteOnce Resources: Requests: Storage: 10Gi Source: Http: URL: https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2Status: Phase: SucceededEvents: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Synced 29s (x13 over 14m) datavolume-controller DataVolume synced successfully Normal Synced 18s datavolume-controller DataVolume synced successfullyThe DataVolume description matches what was defined under dataVolumeTemplates. Now, as we know it uses a PV/PVC underneath, let’s have a look: $ oc describe pvc centos7-dvName: centos7-dvNamespace: test-dvStorageClass: glusterfs-storageStatus: BoundVolume: pvc-191d27c6-cbc0-11e8-b467-c81f666533c3Labels: app=containerized-data-importer cdi-controller=centos7-dvAnnotations: cdi. kubevirt. io/storage. import. endpoint=https://cloud. centos. org/centos/7/images/CentOS-7-x86_64-GenericCloud. qcow2 cdi. kubevirt. io/storage. import. importPodName=importer-centos7-dv-t9zx2 cdi. kubevirt. io/storage. pod. phase=Succeeded pv. kubernetes. io/bind-completed=yes pv. kubernetes. io/bound-by-controller=yes volume. beta. kubernetes. io/storage-provisioner=kubernetes. io/glusterfsFinalizers: [kubernetes. io/pvc-protection]Capacity: 10GiAccess Modes: RWOEvents: Type Reason Age From Message ---- ------ ---- ---- ------- Normal ProvisioningSucceeded 18m persistentvolume-controller Successfully provisioned volume pvc-191d27c6-cbc0-11e8-b467-c81f666533c3 using kubernetes. io/glusterfsIt’s important to pay attention to the annotations, these are monitored/set by CDI. CDI triggers an import when it detects the cdi. kubevirt. io/storage. import. endpoint, assigns a pod as the import task owner and updates the pod phase annotation. At this point, everything is in place, the DataVolume has its underlying components, the image has been imported so now the VirtualMachine can start the VirtualMachineInstance based on its definition and using the CentOS7 image as boot disk, as users we can connect to its console as usual, for instance running the following command: $ virtctl console testvm1Cleaning it up: Once we’re happy with the results, it’s time to clean up all these tests. The task is easy: $ oc delete vm testvm1Once the VM (and its associated VMI) are gone, all the underlying storage resources are removed, there is no trace of the PVC, PV or DataVolume. $ oc get dv centos7-dv$ oc get pvc centos7-dv$ oc get pv pvc-191d27c6-cbc0-11e8-b467-c81f666533c3All three commands returned No resources found. " }, { - "id": 124, + "id": 123, "url": "/2018/containerized-data-importer.html", "title": "Containerized Data Importer", "author" : "tavni", "tags" : "import, clone, upload, virtual machine, disk image, cdi", "body": "IntroductionContainerized Data Importer (CDI) is a utility to import, upload and clone Virtual Machine images for use with KubeVirt. At a high level, a persistent volume claim (PVC), which defines VM-suitable storage via a storage class, is created. A custom controller watches for specific annotation on the persistent volume claim, and when discovered, starts an import, upload or clone process. The status of the each process is reflected in an additional annotation on the associated claim, and when the process completes KubeVirt can create the VM based on the new image. The Containerized Data Cloner gives the option to clone the imported/uploaded VM image from one PVC to another one either within the same namespace or across two different namespaces. This Containerized Data Importer project is designed with KubeVirt in mind and provides a declarative method for importing amd uploading VM images into a Kuberenetes cluster. KubeVirt detects when the VM disk image import/upload is complete and uses the same PVC that triggered the import/upload process, to create the VM. This approach supports two main use-cases: A cluster administrator can build an abstract registry of immutable images (referred to as “Golden Images”) which can be cloned and later consumed by KubeVirt An ad-hoc user (granted access) can import a VM image into their own namespace and feed this image directly to KubeVirt, bypassing the cloning stepFor an in depth look at the system and workflow, see the Design documentation. Data FormatThe Containerized Data Importer is capable of performing certain functions that streamline its use with KubeVirt. It automatically decompresses gzip and xz files, and un-tar’s tar archives. Also, qcow2 images are converted into the raw format which is required by KubeVirt, resulting in the final file being a simple . img file. Supported file formats are: Tar archive Gzip compressed file XZ compressed file Raw image data ISO image data Qemu qcow2 image dataNote: CDI also supports combinations of these formats such as gzipped tar archives, gzipped raw images, etc. Deploying CDIAssumptions: A running Kubernetes cluster that is capable of binding PVCs to dynamically or statically provisioned PVs. A storage class and provisioner (only for dynamically provisioned PVs). An HTTP file server hosting VM images An optional “golden” namespace acting as the image repository. The default namespace is fine for tire kicking. Deploy CDI from a release: Deploying the CDI controller is straight forward. In this document the default namespace is used, but in a production setup a protected namespace that is inaccessible to regular users should be used instead. Ensure that the cdi-sa service account has proper authority to run privileged containers, typically in a kube environment this is true by default. If you are running an openshift variation of kubernetes you may need to enable privileged containers in the security context:$ oc adm policy add-scc-to-user privileged -z cdi-sa Deploy the controller from the release manifest:$ VERSION=<cdi version>$ kubectl create -f https://github. com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-controller. yamlDeploy CDI using a template: By default when using manifests/generated/cdi-controller. yaml CDI will deploy into the kube-system namespace using default settings. You can customize the deployment by using the generated manifests/generated/cdi-controller. yaml. j2 jinja2 template. This allows you to alter the install namespace, docker image repo, docker image tags, etc. To deploy using the template follow these steps: Install j2cli:$ pip install j2cli Install CDI:$ cdi_namespace=default \ docker_prefix=kubevirt \ docker_tag=v1. 2. 0 \ pull_policy=IfNotPresent \ verbosity=1 \ j2 manifests/generated/cdi-controller. yaml. j2 | kubectl create -f -Check the template file and make sure to supply values for all variables. Notes: The default verbosity level is set to 1 in the controller deployment file, which is minimal logging. If greater details are desired increase the -v number to 2 or 3. The importer pod uses the same logging verbosity as the controller. If a different level of logging is required after the controller has been started, the deployment can be edited and applied by using kubectl apply -f . This will not alter the running controller's logging level but will affect importer pods created after the change. To change the running controller's log level requires it to be restarted after the deployment has been edited. Download CDIThere are few ways to download CDI through command line: git clone command:$ git clone https://github. com/kubevirt/containerized-data-importer. git $GOPATH/src/kubevirt. io/containerized-data-importer download only the yamls:$ mkdir cdi-manifests && cd cdi-manifests$ wget https://raw. githubusercontent. com/kubevirt/containerized-data-importer/kubevirt-centric-readme/manifests/example/golden-pvc. yaml$ wget https://raw. githubusercontent. com/kubevirt/containerized-data-importer/kubevirt-centric-readme/manifests/example/endpoint-secret. yaml go get command:$ go get kubevirt. io/containerized-data-importerStart Importing ImagesImport disk image is achieved by creating a new PVC with the ‘cdi. kubevirt. io/storage. import. endpoint’ annotation indicating the url of the source image that we want to download from. Once the controller detects the PVC, it starts a pod which is responsible for importing the image from the given url. Create a PVC yaml file named golden-pvc. yaml: apiVersion: v1kind: PersistentVolumeClaimmetadata: name: golden-pvc labels: app: containerized-data-importer annotations: cdi. kubevirt. io/storage. import. endpoint: https://download. cirros-cloud. net/0. 4. 0/cirros-0. 4. 0-x86_64-disk. img # Required. Format: (http||s3)://www. myUrl. com/path/of/dataspec: accessModes: - ReadWriteOnce resources: requests: storage: 10Gi # Optional: Set the storage class or omit to accept the default # storageClassName: localEdit the PVC above - cdi. kubevirt. io/storage. import. endpoint: The full URL to the VM image in the format of: http://www. myUrl. com/path/of/data or s3://bucketName/fileName. storageClassName: The default StorageClass will be used if not set. Otherwise, set to a desired StorageClass. Note: It is possible to use authentication when importing the image from the endpoint url. Please see using secret during import Deploy the manifest yaml files: Create the persistent volume claim to trigger the import process:$ kubectl -n <NAMESPACE> create -f golden-pvc. yaml (Optional) Monitor the cdi-controller:$ kubectl -n <CDI-NAMESPACE> logs cdi-deployment-<RANDOM> (Optional )Monitor the importer pod:$ kubectl -n <NAMESPACE> logs importer-<PVC-NAME> # pvc name is shown in controller log Verify the import is completed by checking the following annotation value:$ kubectl -n <NAMESPACE> get pvc golden-pvc. yaml -o yamlannotation to verify - cdi. kubevirt. io/storage. pod. phase: Succeeded Start cloning disk imageCloning is achieved by creating a new PVC with the ‘k8s. io/CloneRequest’ annotation indicating the name of the PVC the image is copied from. Once the controller detects the PVC, it starts two pods (source and target pods) which are responsible for the cloning of the image from one PVC to another using a unix socket that is created on the host itself. When the cloning is completed, the PVC which the image was copied to, is assigned with the ‘k8s. io/CloneOf’ annotation to indicate cloning completion. The copied VM image can be used by a new pod only after the cloning process is completed. The two cloning pods must execute on the same node. Pod adffinity is used to enforce this requirement; however, the cluster also needs to be configured to delay volume binding until pod scheduling has completed. When using local storage and Kubernetes 1. 9 and older, export KUBE_FEATURE_GATES before bringing up the cluster: $ export KUBE_FEATURE_GATES= PersistentLocalVolumes=true,VolumeScheduling=true,MountPropagation=true These features default to true in Kubernetes 1. 10 and later and thus do not need to be set. Regardless of the Kubernetes version, a storage class with volumeBindingMode set to “WaitForFirstConsumer” needs to be created. Eg: kind: StorageClass apiVersion: storage. k8s. io/v1 metadata: name: <local-storage-name> provisioner: kubernetes. io/no-provisioner volumeBindingMode: WaitForFirstConsumerCreate a PVC yaml file named target-pvc. yaml: apiVersion: v1kind: PersistentVolumeClaimmetadata: name: target-pvc namespace: target-ns labels: app: Host-Assisted-Cloning annotations: k8s. io/CloneRequest: source-ns/golden-pvc spec: accessModes: - ReadWriteOnce resources: requests: storage: 10GiEdit the PVC above - k8s. io/CloneRequest: The name of the PVC we copy the image from (including its namespace). For example: “source-ns/golden-pvc”. add the name of the storage class which defines volumeBindingMode per above. Note, this is not required in Kubernetes 1. 10 and later. Deploy the manifest yaml files: (Optional) Create the namespace where the target PVC will be deployed:$ kubectl create ns <TARGET-NAMESPACE> Deploy the target PVC:$ kubectl -n <TARGET-NAMESPACE> create -f target-pvc. yaml (Optional) Monitor the cloning pods:$ kubectl -n <SOURCE-NAMESPACE> logs <clone-source-pod-name>$ kubectl -n <TARGET-NAMESPACE> logs <clone-target-pod-name> Check the target PVC for ‘k8s. io/CloneOf’ annotation:$ kubectl -n <TARGET-NAMESPACE> get pvc <target-pvc-name> -o yamlStart uploading disk imageUploading a disk image is achieved by creating a new PVC with the ‘cdi. kubevirt. io/storage. upload. target’ annotation indicating the request for uploading. Part of the uploading process is the authentication of upload requests with an UPLOAD_TOKEN header. The user posts an upload token request to the cluster, and the encrypted Token is returned immediately within the response in the status field. For this to work, a dedicated service is deployed with a nodePort field. At that point, a curl request including the token is sent to start the upload process. Given the upload PVC and the curl request, the controller starts a pod which is responsible for uploading the local image to the PVC. Create a PVC yaml file named upload-pvc. yaml: apiVersion: v1kind: PersistentVolumeClaimmetadata: name: upload-pvc labels: app: containerized-data-importer annotations: cdi. kubevirt. io/storage. upload. target: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1GiCreate the upload-token. yaml file: apiVersion: upload. cdi. kubevirt. io/v1alpha1kind: UploadTokenRequestmetadata: name: upload-pvc namespace: defaultspec: pvcName: upload-pvcUpload an image: deploy the upload-pvc$ kubectl apply -f upload-pvc. yaml Request for upload token$ TOKEN=$(kubectl apply -f upload-token. yaml -o= jsonpath={. status. token} ) Upload the image$ curl -v --insecure -H Authorization: Bearer $TOKEN --data-binary @tests/images/cirros-qcow2. img https://$(minikube ip):31001/v1alpha1/uploadSecurity ConfigurationsRBAC Roles: CDI runs under a custom ServiceAccount (cdi-sa) and uses the Kubernetes RBAC model to apply an application specific custom ClusterRole with rules to properly access needed resources such as PersistentVolumeClaims and Pods. Protecting VM Image Namespaces: Currently there is no support for automatically implementing Kubernetes ResourceQuotas and Limits on desired namespaces and resources, therefore administrators need to manually lock down all new namespaces from being able to use the StorageClass associated with CDI/KubeVirt and cloning capabilities. This capability of automatically restricting resources is planned for future releases. Below are some examples of how one might achieve this level of resource protection: Lock Down StorageClass Usage for Namespace:apiVersion: v1kind: ResourceQuotametadata: name: protect-mynamespacespec: hard: <STORAGE-CLASS-NAME>. storageclass. storage. k8s. io/requests. storage: 0 Note . storageclass. storage. k8s. io/persistentvolumeclaims: 0 would also accomplish the same affect by not allowing any pvc requests against the storageclass for this namespace. Open Up StorageClass Usage for Namespace:apiVersion: v1kind: ResourceQuotametadata: name: protect-mynamespacespec: hard: <STORAGE-CLASS-NAME>. storageclass. storage. k8s. io/requests. storage: 500Gi Note . storageclass. storage. k8s. io/persistentvolumeclaims: 4 could be used and this would only allow for 4 pvc requests in this namespace, anything over that would be denied. " }, { - "id": 125, + "id": 124, "url": "/2018/changelog-v0.9.0.html", "title": "KubeVirt v0.9.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 9. 0: Released on: Thu Oct 4 14:42:28 2018 +0200 CI: NetworkPolicy tests CI: Support for an external provider (use a preconfigured cluster for tests) Fix virtctl console issues with CRI-O Support to initialize empty PVs Support for basic CPU pinning Support for setting IO Threads Support for block volumes Move preset logic to mutating webhook Introduce basic metrics reporting using prometheus metrics Many stabilizing fixes in many places" }, { - "id": 126, + "id": 125, "url": "/2018/KubeVirt-Network-Rehash.html", "title": "Kubevirt Network Rehash", "author" : "jcpowermac", "tags" : "networking, multus, ovs-cni, iptables", "body": "IntroductionThis post is a quick rehash of the previous post regarding KubeVirt networking. It has been updated to reflect the updates that are included with v0. 8. 0 which includesoptional layer 2 support via Multus and the ovs-cni. I won’t be covering the installationof OKD, Kubernetes, KubeVirt, Multus or ovs-cni all can be found in other documentation orposts. KubeVirt Virtual MachinesLike in the previous post I will deploy two virtual machines on two different hosts within an OKD cluster. These instances are where we will install our simple NodeJS and MongoDB application. Create Objects and Start the Virtual Machines: One of the first objects to create is the NetworkAttachmentDefinition. We are using a fairly simple definition for this post with an ovs bridge br1 and no vlan configured. apiVersion: k8s. cni. cncf. io/v1 kind: NetworkAttachmentDefinitionmetadata: name: ovs-net-br1spec: config: '{ cniVersion : 0. 3. 1 , type : ovs , bridge : br1 }'oc create -f https://gist. githubusercontent. com/jcpowermac/633de0066ee7990afc09fbd35ae776fe/raw/ac259386e1499b7f9c51316e4d5dcab152b60ce7/mongodb. yamloc create -f https://gist. githubusercontent. com/jcpowermac/633de0066ee7990afc09fbd35ae776fe/raw/ac259386e1499b7f9c51316e4d5dcab152b60ce7/nodejs. yamlStart the virtual machines instances ~/virtctl start nodejs~/virtctl start mongodbReview KubeVirt virtual machine related objects $ oc get net-attach-defNAME AGEovs-net-br1 16d$ oc get vmNAME AGEmongodb 4dnodejs 4d$ oc get vmiNAME AGEmongodb 3hnodejs 3h$ oc get podNAME READY STATUS RESTARTS AGEvirt-launcher-mongodb-bw2t8 2/2 Running 0 3hvirt-launcher-nodejs-dlgv6 2/2 Running 0 3hService and Endpoints: We may still want to use services and routes with a KubeVirt virtual machine instance utilizingmultiple interfaces. The service object below is consideredheadlessbecause the clusterIP is set to None. We don’t want load-balancing or single service IP asthis would force traffic over the cluster network which in this example we are trying to avoid. Mongo: ---kind: ServiceapiVersion: v1metadata: name: mongospec: clusterIP: None ports: - port: 27017 targetPort: 27017 name: mongo nodePort: 0selector: {}---kind: EndpointsapiVersion: v1metadata: name: mongosubsets: - addresses: - ip: 192. 168. 123. 139 ports: - port: 27017 name: mongoThe above ip address is provided by DHCP via dnsmasq to the virtual machine instance’s eth1 interface. All the nodes are virtual instances configured by libvirt. After creating the service and endpoints objects lets confirm that DNS is resolving correctly. $ ssh fedora@$(oc get pod -l kubevirt-vm=nodejs --template '{{ range . items }}{{. status. podIP}}{{end}}') \ python3 -c \ import socket;print(socket. gethostbyname('mongo. vm. svc. cluster. local'))\ 192. 168. 123. 139Node: We can also add a service, endpoints and route for the nodejs virtual machine so the applicationis accessible from the defined subdomain. apiVersion: v1kind: Servicemetadata: name: nodespec: clusterIP: None ports: - name: node port: 8080 protocol: TCP targetPort: 8080 sessionAffinity: None type: ClusterIP---apiVersion: v1kind: Endpointsmetadata: name: nodesubsets: - addresses: - ip: 192. 168. 123. 140 ports: - name: node port: 8080 protocol: TCP---apiVersion: v1kind: Routemetadata: name: nodespec: to: kind: Service name: nodeTesting our application: I am using the same application and method of installation as the previous post so I won’tduplicate it here. Just in case though let’s make sure that the application is availablevia the route. $ curl http://node-vm. apps. 192. 168. 122. 101. nip. io <!DOCTYPE html><html lang= en > <head> <meta charset= utf-8 /> <meta http-equiv= X-UA-Compatible content= IE=edge,chrome=1 /> <title>Welcome to OpenShift</title> . . . outout. . . <p> Page view count: <span class= code id= count-value >2</span> . . . output. . . </p> </head></html>Networking in DetailJust like in the previous post we should confirm how this works all together. Let’s review the virtual machine to virtual machinecommunication and route to virtual machine. Kubernetes-level: services: We have created two headless services one for node and one for mongo. This allows us to use the hostname mongo to connect to MongoDB via the alternative interface. $ oc get servicesNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEmongo ClusterIP None <none> 27017/TCP 8hnode ClusterIP None <none> 8080/TCP 7h$ ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') cat /etc/sysconfig/nodejsMONGO_URL=mongodb://nodejs:nodejspassword@mongo. vm. svc. cluster. local/nodejsendpoints: The endpoints below were manually created for each virtual machine based on the IP Address of eth1. $ oc get endpointsNAME ENDPOINTS AGEmongo 192. 168. 123. 139:27017 8hnode 192. 168. 123. 140:8080 7hroute: This will allow us access the NodeJS example application using the route url. $ oc get route NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARDnode node-vm. apps. 192. 168. 122. 101. nip. io node <all> NoneHost-level: In addition to the existing interface eth0 and bridge br0, eth1 is the uplink for the ovs-cni bridge br1. This needs to be manually configured prior to use. interfaces: ip a . . . output. . . 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 52:54:00:5f:90:85 brd ff:ff:ff:ff:ff:ff inet 192. 168. 122. 111/24 brd 192. 168. 122. 255 scope global noprefixroute dynamic eth0 valid_lft 2282sec preferred_lft 2282sec inet6 fe80::5054:ff:fe5f:9085/64 scope link valid_lft forever preferred_lft forever3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master ovs-system state UP group default qlen 1000 link/ether 52:54:01:5f:90:85 brd ff:ff:ff:ff:ff:ff. . . output. . . 5: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000 link/ether 2a:6e:65:7e:65:3a brd ff:ff:ff:ff:ff:ff9: br1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000 link/ether 6e:d5:db:12:b5:43 brd ff:ff:ff:ff:ff:ff10: br0: <BROADCAST,MULTICAST> mtu 1450 qdisc noop state DOWN group default qlen 1000 link/ether aa:3c:bd:5a:ac:46 brd ff:ff:ff:ff:ff:ff. . . output. . . Bridge: The command and output below shows the Open vSwitch bridge and interfaces. The veth8bf25a9b interfaceis one of the veth pair created to connect the virtual machine to the Open vSwitch bridge. ovs-vsctl show 77147900-3d26-46c6-ac0b-755da3aa4b97 Bridge br1 Port br1 Interface br1 type: internal Port veth8bf25a9b Interface veth8bf25a9b Port eth1 Interface eth1 . . . output. . . Pod-level: interfaces: There are two bridges k6t-eth0 and k6t-net0. eth0 and net1 are a veth pair with the alternate sideavailable on the host. eth0 is a member of the k6t-eth0 bridge. net1 is a member of the k6t-net0 bridge. ~ oc exec -n vm -c compute virt-launcher-nodejs-76xk7 -- ip a . . . output3: eth0@if41: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master k6t-eth0 state UP group default link/ether 0a:58:0a:17:79:04 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::858:aff:fe17:7904/64 scope link valid_lft forever preferred_lft forever5: net1@if42: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master k6t-net1 state UP group default link/ether 02:00:00:74:17:75 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::ff:fe74:1775/64 scope link valid_lft forever preferred_lft forever6: k6t-eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default link/ether 0a:58:0a:17:79:04 brd ff:ff:ff:ff:ff:ff inet 169. 254. 75. 10/32 brd 169. 254. 75. 10 scope global k6t-eth0 valid_lft forever preferred_lft forever inet6 fe80::858:aff:fe82:21/64 scope link valid_lft forever preferred_lft forever7: k6t-net1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:00:00:74:17:75 brd ff:ff:ff:ff:ff:ff inet 169. 254. 75. 11/32 brd 169. 254. 75. 11 scope global k6t-net1 valid_lft forever preferred_lft forever inet6 fe80::ff:fe07:2182/64 scope link dadfailed tentative valid_lft forever preferred_lft forever8: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc pfifo_fast master k6t-eth0 state UNKNOWN group default qlen 1000 link/ether fe:58:0a:82:00:21 brd ff:ff:ff:ff:ff:ff inet6 fe80::fc58:aff:fe82:21/64 scope link valid_lft forever preferred_lft forever9: vnet1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master k6t-net1 state UNKNOWN group default qlen 1000 link/ether fe:37:cf:e0:ad:f2 brd ff:ff:ff:ff:ff:ff inet6 fe80::fc37:cfff:fee0:adf2/64 scope link valid_lft forever preferred_lft foreverShowing the bridge k6t-eth0 and k6t-net member ports. ~ oc exec -n vm -c compute virt-launcher-nodejs-dlgv6 -- bridge link show 3: eth0 state UP @if41: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master k6t-eth0 state forwarding priority 32 cost 25: net1 state UP @if42: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 master k6t-net1 state forwarding priority 32 cost 28: vnet0 state UNKNOWN : <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master k6t-eth0 state forwarding priority 32 cost 1009: vnet1 state UNKNOWN : <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 master k6t-net1 state forwarding priority 32 cost 100DHCP: The virtual machine network is configured by DHCP. You can see virt-launcher has UDP port 67 openon the k6t-eth0 interface to serve DHCP to the virtual machine. As described in the previouspost the virt-launcher process containsa simple DHCP server that provides an offer and typical options to the virtual machine instance. ~ oc exec -n vm -c compute virt-launcher-nodejs-dlgv6 -- ss -tuanp Netid State Recv-Q Send-Q Local Address:Port Peer Address:Portudp UNCONN 0 0 0. 0. 0. 0%k6t-eth0:67 0. 0. 0. 0:* users:(( virt-launcher ,pid=7,fd=15))libvirt: With virsh domiflist we can also see that the vnet0 interface is a member on the k6t-eth0 bridge and vnet1 is a member of the k6t-net1 bridge. ~ oc exec -n vm -c compute virt-launcher-nodejs-dlgv6 -- virsh domiflist vm_nodejs Interface Type Source Model MAC-------------------------------------------------------vnet0 bridge k6t-eth0 virtio 0a:58:0a:82:00:2avnet1 bridge k6t-net1 virtio 20:37:cf:e0:ad:f2VM-level: interfaces: Fortunately the vm interfaces are fairly typical. Two interfaces: one that has been assigned the originalpod ip address and the other the ovs-cni layer 2 interface. The eth1 interface receives a IP addressfrom DHCP provided by dnsmasq that was configured by libvirt network on the physical host. ~ ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') sudo ip a . . . output. . . 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc fq_codel state UP group default qlen 1000 link/ether 0a:58:0a:82:00:2a brd ff:ff:ff:ff:ff:ff inet 10. 130. 0. 42/23 brd 10. 130. 1. 255 scope global dynamic eth0 valid_lft 86239518sec preferred_lft 86239518sec inet6 fe80::858:aff:fe82:2a/64 scope link tentative dadfailed valid_lft forever preferred_lft forever3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 20:37:cf:e0:ad:f2 brd ff:ff:ff:ff:ff:ff inet 192. 168. 123. 140/24 brd 192. 168. 123. 255 scope global dynamic eth1 valid_lft 3106sec preferred_lft 3106sec inet6 fe80::2237:cfff:fee0:adf2/64 scope link valid_lft forever preferred_lft foreverConfiguration and DNS: In this example we want to use Kubernetes services so special care must be used whenconfiguring the network interfaces. The default route and dns configuration must bemaintained by eth0. eth1 has both route and dns configuration disabled. ~ ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') sudo cat /etc/sysconfig/network-scripts/ifcfg-eth0 BOOTPROTO=dhcpDEVICE=eth0ONBOOT=yesTYPE=EthernetUSERCTL=no# Use route and dns from DHCPDEFROUTE=yesPEERDNS=yes~ ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') sudo cat /etc/sysconfig/network-scripts/ifcfg-eth1 BOOTPROTO=dhcpDEVICE=eth1IPV6INIT=noNM_CONTROLLED=noONBOOT=yesTYPE=Ethernet# Do not use route and dns from DHCPPEERDNS=noDEFROUTE=noJust quickly wanted to cat the /etc/resolv. conf file to show that DNS is configured so that kube-dns will be properly queried. ~ ssh fedora@$(oc get pod virt-launcher-nodejs-76xk7 --template '{{. status. podIP}}') sudo cat /etc/resolv. conf search vm. svc. cluster. local. svc. cluster. local. cluster. local. 168. 122. 112. nip. io. nameserver 192. 168. 122. 112VM to VM communication: The virtual machines are on different hosts. This was done purposely to show that connectivitybetween virtual machine and hosts. Here we finally get to use Skydive. The real-time topology below along witharrows annotate the flow of packets between the host and virtual machine network devices. VM to VM Connectivity Tests: To confirm connectivity we are going to do a few things. First look for an establishedconnection to MongoDB and finally check the NodeJS logs looking for confirmation of database connection. TCP connection: After connecting to the nodejs virtual machine via ssh we can use ss to determine the current TCP connections. We are specifically looking for the established connections to the MongoDB service that is running on the mongodb virtual machine. ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') sudo ss -tanp State Recv-Q Send-Q Local Address:Port Peer Address:Port. . . output. . . ESTAB 0 0 192. 168. 123. 140:33156 192. 168. 123. 139:27017 users:(( node ,pid=12893,fd=11))ESTAB 0 0 192. 168. 123. 140:33162 192. 168. 123. 139:27017 users:(( node ,pid=12893,fd=13))ESTAB 0 0 192. 168. 123. 140:33164 192. 168. 123. 139:27017 users:(( node ,pid=12893,fd=14)). . . output. . . Logs: Here we are reviewing the logs of node to confirm we have a database connection to mongo via the service hostname. ssh fedora@$(oc get pod virt-launcher-nodejs-dlgv6 --template '{{. status. podIP}}') sudo journalctl -u nodejs . . . output. . . October 01 18:28:09 nodejs. localdomain systemd[1]: Started OpenShift NodeJS Example. October 01 18:28:10 nodejs. localdomain node[12893]: Server running on http://0. 0. 0. 0:8080October 01 18:28:10 nodejs. localdomain node[12893]: Connected to MongoDB at: mongodb://nodejs:nodejspassword@mongo. vm. svc. cluster. local/nodejs. . . output. . . Route to VM communication: Finally let’s confirm that when using the OKD route that traffic is successfully routed to nodejs eth1 interface. HAProxy Traffic Status: OKD HAProxy provides optional traffic status - which we already enabled. The screenshot below showsthe requests that Nginx is receiving for nodejs. ingress. virtomation. com. haproxy-stats HAProxy to NodeJS VM: The HAProxy pod runs on the master OKD in this scenario. Using skydive we can see a TCP 8080 connection to nodejs eth1 interface exiting eth1 of the master. $ oc get pod -o wide -n default -l router=router NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODErouter-2-nfqr4 0/1 Running 0 20h 192. 168. 122. 101 192. 168. 122. 101. nip. io <none> haproxy-vm " }, { - "id": 127, + "id": 126, "url": "/2018/attaching-to-multiple-networks.html", "title": "Attaching To Multiple Networks", "author" : "yuvalif", "tags" : "multus, networking, CNI, multiple networks", "body": "IntroductionVirtual Machines often need multiple interfaces connected to different networks. This could be because the application running on it expect to be connected to different interfaces (e. g. a virtual router running on the VM); because the VM need L2 connectivity to a network not managed by Kubernetes (e. g. allow for PXE booting); because an existing VM, ported into KubeVirt, runs applications that expect multiple interfaces to exists; or any other reason - which we’ll be happy to hear about in the comment section! In KubeVirt, as nicely explained in this blog post, there is already a mechanism to take an interface from the pod and move it into the Virtual Machine. However, Kubernetes allows for a single network plugin to be used in a cluster (across all pods), and provide one interface for each pod. This forces us to choose between having pod network connectivity and any other network connectivity for the pod and, in the context of KubeVirt, the Virtual Machine within. To overcome this limitation, we use Multus, which is a “meta” CNI (Container Network Interface), allowing multiple CNIs to coexist, and allow for a pod to use the right ones for its networking needs. How Does it Work for Pods?The magic is done via a new CRD (Custom Resource Definition) called NetworkAttachmentDefinition introduced by the Multus project, and adopted by the Kubernetes community as the de-facto standard for attaching pods to one or more networks. These network definition contains a field called type which indicates the name of the actual CNI that provide the network, and different configuration payloads which the Multus CNI is passing to the actual CNI. For example, the following network definition: apiVersion: k8s. cni. cncf. io/v1 kind: NetworkAttachmentDefinitionmetadata: name: a-bridge-networkspec: config: '{ cniVersion : 0. 3. 0 , name : a-bridge-network , type : bridge , bridge : br0 , isGateway : true, ipam : { type : host-local , subnet : 192. 168. 5. 0/24 , dataDir : /mnt/cluster-ipam }}'Allows attaching a pod into a network provided by the bridge CNI. Once a pod with the following annotation is created: apiVersion: v1kind: Podmetadata: name: samplepod annotations: k8s. v1. cni. cncf. io/networks: a-bridge-networkspec:The Multus CNI will find out whether a CNI of type bridge exists, and invoke it with the rest of the configuration in the CRD. Even without Multus, this exact configuration could have been put under /etc/cni/net. d, and provide the same network to the pod, using the bride CNI. But, in such a case, this would have been the only network interface to the pod, since Kubernetes just takes the first configuration file from that directory (sorted by alphabetical order) and use it to provide a single interface for all pods. If we have Multus around, and some other CNI (e. g. flannel), in addition to the bridge one, we could have have defined another NetworkAttachmentDefinition object, of type flannel, with its configuration, for example: apiVersion: k8s. cni. cncf. io/v1 kind: NetworkAttachmentDefinitionmetadata: name: flannel-networkspec: config: '{ cniVersion : 0. 3. 0 , type : flannel , delegate : { isDefaultGateway : true } }'Add a reference to it in the pod’s annotation, and have two interfaces, connected to two different networks on the pod. It is quite common that basic networking is provided by one of the mainstream CNIs (flannel, calico, weave etc. ) for all pods, and more advanced cases are added specifically when needed. For that, a default CNI could be configured for Multus, so that a NetworkAttachmentDefinition object is not needed, nor any annotation at pod level. The interface provided for such a network wil be marked as eth0 on the pod, for smooth transition when Multus is introduced into an cluster with networking. Any other interface added to the pod due to an explicit NetworkAttachmentDefinition object, will be marked as: net1, net2 and so on. How Does it Work in KubeVirt?Most initial steps would be the same as in the pod’s case: Install the different CNIs that you would like to provide networks to our Virtual Machines Install Multus Configure Multus with some default CNI that we would like to provide eth0 for all Virtual Machines Add NetworkAttachmentDefinition object for each network that we would like some of our Virtual Machines to be usingNow, inside the VMI (virtual Machine Instance) definition, a new type of network called multus should be added: networks: - name: default-net pod: {} - name: another-net multus: networkName: a-bridge-networkThis would allow VMI interfaces to be connected to two networks: default which is connected to the CNI which is defined as the default one for Multus. No NetworkAttachmentDefinition CRD is needed for this one, and we assume that the needed configuration is just taken from the default CNI’s configuration under /etc/cni/net. d/. We also assume that an IP address will be provided to eth0 on the pod, which will be delegated to the Virtual Machine’s eth0 interface. another-net which is connected to the network defined by a NetworkAttachmentDefinition CRD named a-bridge-network. The identity fo the CNI that would actually provide the network, as well as the configuration for this network are all defined in the CRD. An interface named net1 connected to that network wil be created on the pod. If this interface get an IP address from the CNI, this IP will be delegated to the Virtual Machine’s eth1 interface. If no IP address is given by the CNI, no IP will be given to eth1 on the Virtual Machine, and only L2 connectivity will be provided. Deployment ExampleIn the following example we use flannel as the CNI that provides the primary pod network, and an OVS bridge CNI provides a secondary network. Install Kubernetes: This was tested with latest version, on a single node cluster. Best would be to just follow these instructions Since we use a single node cluster, Don’t forget to allow scheduling pods on the master:$ kubectl taint nodes --all node-role. kubernetes. io/master- If running kubectl from master itself, don’t forget to copy over the conf file:$ mkdir -p /$USER/. kube && cp /etc/kubernetes/admin. conf /$USER/. kube/configInstall Flannel: Make sure pass these parameters are used when starting kubeadm:$ kubeadm init --pod-network-cidr=10. 244. 0. 0/16 Then call:$ kubectl apply -f https://raw. githubusercontent. com/coreos/flannel/v0. 10. 0/Documentation/kube-flannel. ymlInstall and Start OVS: On Fedora28 that would be (see here for other options):$ dnf install openvswitch$ systemctl start openvswitchInstall and Configure Multus: Install Multus as a daemon set (flannel is already set as the default CNI in the yaml below):$ kubectl apply -f https://raw. githubusercontent. com/intel/multus-cni/master/images/multus-daemonset. yml Make sure that Multus is the first CNI under: /etc/cni/net. d/. If not, rename it so it would be the first, e. g. : mv /etc/cni/net. d/70-multus. conf /etc/cni/net. d/00-multus. confInstall and Configure OVS CNI: First step would be to create the OVS bridge:ovs-vsctl add-br blue To install the OVS CNI use:$ kubectl apply -f https://raw. githubusercontent. com/k8snetworkplumbingwg/ovs-cni/main/examples/ovs-cni. yml Create a NetworkAttachmentDefinition CRD for the “blue” bridge:apiVersion: k8s. cni. cncf. io/v1 kind: NetworkAttachmentDefinitionmetadata: name: ovs-bluespec: config: '{ cniVersion : 0. 3. 1 , type : ovs , bridge : blue }' To use as specific port/vlan from that bridge, you should first create one:ovs-vsctl add-br blue1 blue 100 Then, define its NetworkAttachmentDefinition CRD:apiVersion: k8s. cni. cncf. io/v1 kind: NetworkAttachmentDefinitionmetadata: name: ovs-blue100spec: config: '{ cniVersion : 0. 3. 1 , type : ovs , bridge : blue100 , vlan : 100 }' More information could be found in the OVS CNI documentationDeploy a Virtual Machine with 2 Interfaces: First step would be to deploy KubeVirt (note that 0. 8 is needed for Multus support):$ export VERSION=v0. 8. 0$ kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/$VERSION/kubevirt. yaml Now, create a VMI with 2 interfaces, one connected to the default network (flannel in our case) and one to the OVS “blue” bridge:apiVersion: kubevirt. io/v1alpha2kind: VirtualMachineInstancemetadata: creationTimestamp: null labels: special: vmi-multus-multiple-net name: vmi-multus-multiple-netspec: domain: devices: disks: - disk: bus: virtio name: registrydisk volumeName: registryvolume - disk: bus: virtio name: cloudinitdisk volumeName: cloudinitvolume interfaces: - bridge: {} name: default - bridge: {} name: ovs-blue-net machine: type: resources: requests: memory: 1024M networks: - name: default pod: {} - multus: networkName: ovs-blue name: ovs-blue-net terminationGracePeriodSeconds: 0 volumes: - name: registryvolume registryDisk: image: kubevirt/fedora-cloud-container-disk-demo - cloudInitNoCloud: userData: | #!/bin/bash echo fedora | passwd fedora --stdin name: cloudinitvolumestatus: {} Once the machine is up and running, you can use virtctl to log into it and make sure that eth0 exists as the default interface (with an IP address on the flannel subnet) and eth1 as the interface connected to the OVS bridge (without an IP)" }, { - "id": 128, + "id": 127, "url": "/2018/KubeVirt-Memory-Overcommit.html", "title": "Kubevirt Memory Overcommit", "author" : "tripledes", "tags" : "memory, overcommitment", "body": "KubeVirt memory overcommitmentOne of the latest additions to KubeVirt has been the memory overcommitment feature which allows the memory being assigned to a Virtual Machine Instance to be different than what it requests to Kubernetes. What it does: As you might already know, when a pod is created in Kubernetes, it can define requests for resources like CPU or memory, those requests are taken into account for deciding to what node the pod will be scheduled. Usually, on a node, there are already some resources reserved or requested, Kubernetes itself reserves some resources for its processes and there might be monitoring pods or storage pods already requesting resources as well, all those are also accounted for what is left to run pods. Having the memory overcommitment feature included in KubeVirt allows the users to assign the VMI more or less memory than set into the requests, offering more flexibility, giving the user the option to overcommit (or undercommit) the node’s memory if needed. How does it work?: It’s not too complex to get this working, all that is needed is to have, at least, KubeVirt version 0. 8. 0 installed, which includes the aforementioned feature, and use the following settings on the VMI definition: domain. memory. guest: Defines the amount memory assigned to the VMI process (by libvirt). domain. resources. requests. memory: Defines the memory requested to Kubernetes by the pod that will run the VMI. domain. resources. overcommitGuestOverhead: Boolean switch to enable the feature. Once those are in place, Kubernetes will consider the requested memory for scheduling while libvirt will define the domain with the amount of memory defined in domain. memory. guest. For example, let’s define a VMI which requests 24534983Ki but wants to use 25761732Kiinstead. apiVersion: kubevirt. io/v1alpha2kind: VirtualMachineInstancemetadata: name: testvm1 namespace: kubevirtspec: domain: memory: guest: 25761732Ki resources: requests: memory: 24534983Ki overcommitGuestOverhead: true devices: disks: - volumeName: myvolume name: mydisk disk: bus: virtio - name: cloudinitdisk volumeName: cloudinitvolume cdrom: bus: virtio volumes: - name: myvolume registryDisk: image: <registry_address>/kubevirt/fedora-cloud-container-disk-demo:latest - cloudInitNoCloud: userData: | #cloud-config hostname: testvm1 users: - name: kubevirt gecos: KubeVirt Project sudo: ALL=(ALL) NOPASSWD:ALL passwd: $6$JXbc3063IJir. e5h$ypMlYScNMlUtvQ8Il1ldZi/mat7wXTiRioGx6TQmJjTVMandKqr. jJfe99. QckyfH/JJ. OdvLb5/OrCa8ftLr. shell: /bin/bash home: /home/kubevirt lock_passwd: false name: cloudinitvolumeAs explained already, the QEMU process spawn by libvirt, will get 25761732Ki of RAM, minus some amount for the graphics and firmwares, the guest OS will see its total memory close to that amount, while Kubernetes would think the pod requests 24534983Ki, making more room to schedule more pods if needed. Now let’s imagine we want to undercommit, here’s the same YAML definition but setting less memory than requested: apiVersion: kubevirt. io/v1alpha2kind: VirtualMachineInstancemetadata: name: testvm1 namespace: kubevirtspec: domain: memory: guest: 23308234Ki resources: requests: memory: 24534983Ki overcommitGuestOverhead: true devices: disks: - volumeName: myvolume name: mydisk disk: bus: virtio - name: cloudinitdisk volumeName: cloudinitvolume cdrom: bus: virtio volumes: - name: myvolume registryDisk: image: <registry_url>/kubevirt/fedora-cloud-container-disk-demo:latest - cloudInitNoCloud: userData: | #cloud-config hostname: testvm1 users: - name: kubevirt gecos: KubeVirt Project sudo: ALL=(ALL) NOPASSWD:ALL passwd: $6$JXbc3063IJir. e5h$ypMlYScNMlUtvQ8Il1ldZi/mat7wXTiRioGx6TQmJjTVMandKqr. jJfe99. QckyfH/JJ. OdvLb5/OrCa8ftLr. shell: /bin/bash home: /home/kubevirt lock_passwd: false name: cloudinitvolumeWhy this is needed: At this point you might be asking yourself why would this feature be needed if Kubernetes already does resource management for you, right? Well, there might be few scenarios where this feature would be needed, for instance imagine you decide to have a cluster or few nodes completely dedicated to run Virtual Machines, this feature allows you to make use of all the memory in the nodes without really accounting for the already reserved or requested memory in the system. Let’s put it as an example, say a node has 100GiB of RAM, with 2GiB of reserved memory plus 1GiB requested by monitoring and storage pods, that leaves the user 97GiB of allocatable memory to schedule pods, so each VMI that needs to be started on a node needs to request an amount that would fit, if the user wants to run 10 VMIs on each node with 10GiB of RAM Kubernetes wouldn’t allow that cause the sum of their requests would be more than what’s allocatable in the node. Using the memory overcommitment feature the user can tell Kubernetes that each VMI requests 9. 7GiB and set domain. memory. guest to 10GiB. The other way around, undercommitting the node, also works, for instance, to make sure that no matter how many VMIs will be under memory pressure the node will still be in good shape. Using the same node sizing, 100GiB, we could define 10 VMIs to request 9. 7GiB, while giving them exactly 9. 0GiB, that’d leave around 7GiB for the node processes while Kubernetes wouldn’t try to schedule any more pods on it cause all the requests already sum up to 100% of the allocatable memory. " }, { - "id": 129, + "id": 128, "url": "/2018/changelog-v0.8.0.html", "title": "KubeVirt v0.8.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 8. 0: Released on: Thu Sep 6 14:25:22 2018 +0200 Support for DataVolume Support for a subprotocol for webbrowser terminals Support for virtio-rng Support disconnected VMs Support for setting host model Support for host CPU passthrough Support setting a vNICs mac and PCI address Support for memory over-commit Support booting from network devices Use less devices by default, aka disable unused ones Improved VMI shutdown status More logging to improve debugability A lot of small fixes, including typos and documentation fixes Race detection in tests Hook improvements Update to use Fedora 28 (includes updates of dependencies like libvirt and Move CI to support Kubernetes 1. 11" }, { - "id": 130, + "id": 129, "url": "/2018/kubevirtci.html", "title": "Kubevirtci", "author" : "awels", "tags" : "kubevirtci, ci-cd, cicd, qemu, virtual machine, container", "body": "Building Clusters with kubevirtciOne of the projects in the KubeVirt github organization is a project called kubevirtci. While this may sound like it’s the repo containing the KubeVirt CI system and scripts, that’s not completely accurate. We leverage kubevirtci for our CI process, but there’s more to the CI system than just this repo. Today, we’re not going to talk about the CI system in general, but instead we’re going to talk about the kubevirtci project, what it does, how it does it and why it’s pretty cool. What it does: In short: Deploys a Kubernetes (or OpenShift) cluster using QEMU Virtual Machines, that run inside Docker containers. First a base image is provisioned, this image contains a stock a Kubernetes node. Then one or more of these images are deployed in the target environment to make up the cluster. Provisioning: Building the Kubernetes Node virtual machine happens in several steps: Use a Fedora cloud image. Provision Kubernetes using Ansible onto that image. Install any drivers and providers/provisioners for storage or other resources needed for the cluster. Once the node is provisioned, it is packaged inside a Docker container and published to a registry. Deployment: To create a cluster, all one has to do is start up a number of containers, which in turn start up the Virtual Machine contained within that container. The first node is designated as the master node and a script is run during startup to configure any master node specific configuration. Any additional nodes have a post startup script run to register themselves as nodes with the master. As this point we have a basic kubernetes cluster running the number of specified nodes. Plus some other interesting services described later. How does it work?: After the provision step, which has created a pre-built Kubernetes node in a virtual machine, the deployment step doesn’t happen all by itself. Starting up the cluster is handled by a cli application, aptly named ‘gocli’. Gocli is a go application that contains the knowledge needed to start the cluster, make a node the master node, and then register the other nodes with the master node as compute nodes. It also does a few other nice things, like start a dnsmasq container for dns inside the cluster, and a docker registry container for images. It also can extract and update the kubeconfig for the cluster, to make it possible for external tools to interact with the cluster (such as kubectl). And it can of course shut down the cluster in an orderly fashion. The entire cluster is ephemeral which is very helpful when developing new applications which could potentially damage the cluster. Simply shut down the cluster, and start a new fresh one. The ephemeral nature and ease of deployment makes this extremely useful in a CI/CD pipeline context Why is the registry container useful? Let’s say you are developing a new application and you wish to test it in a Kubernetes cluster. You would build the container image with whatever tools you normally use, you can then publish it to the cluster registry, and deploy the manifest that uses the registry to deploy the container in the cluster. The gocli command tool has the ability to find an external name and port for the internal registry into which you can publish your images. All of the above allows you to now with a few commands, spin up a kubernetes cluster, build application images, push those images into a registry inside that cluster, and deploy that application inside the cluster. As a developer this allows quick compile/test scenarios in a somewhat realistic environment on a regular workstation. It allows for automatic end to end testing in a real cluster, and it allows for CI/CD tools to run tests against an ephemeral cluster that can be spun up and destroyed easily. Why this is cool: So how is this better than simply creating some Virtual Machines and cloning them to add new nodes? For one packaging is much easier, you can simply drop it in a container registry and its available everywhere the registry is available. Another thing is a lot of the configuration is stored in the container instead of in the Virtual Machine, so it is easy to layer on top of that container a different configuration for different use cases. The Virtual Machine stays the same, it’s the container that changes to meet the needs of the use case. Since the container knows all the details of the Virtual Machine it is also possible to construct utilities that retrieve information about the Virtual Machine or pass new information to the Virtual Machine through the container. But the biggest advantage is that this can all be easily automated and repeated. Each time you spin up a cluster, it will be identical from a previous run. You have a mechanism to allow you to populate the cluster with an application you are developing/testing and then running automated processes against that application. KubeVirt itself works on a similar principal, embed a QEMU process in a container to start a Virtual Machine with configuration obtained from the encapsulating container. And the KubeVirt development team uses kubevirtci images in their development workflow as well. And a final interesting thought: Everything mentioned in this article is a container, from the Virtual Machine images, to the gocli utility. It might be an interesting exercise to see if we can leverage kubernetes to manage the life cycle in a CI/CD system. We would then be creating Kubernetes clusters inside a kubernetes cluster to run CI/CD. " }, { - "id": 131, + "id": 130, "url": "/2018/Kubevirt-v0.7.0.html", "title": "Kubevirt V0.7.0", "author" : "karmab", "tags" : "hilights, release notes, review, hugepages", "body": "IntroductionKubeVirt v0. 7. 0 was released a few weeks ago and brings a bunch of new features that this blog post will detail. The full list is visible here but we will pick the ones oriented to the end user Featureshugepages support: To use hugepages as backing memory, we need to indicate a desired amount of memory (resources. requests. memory) and size of hugepages to use (memory. hugepages. pageSize) apiVersion: kubevirt. io/v1alpha1kind: VirtualMachinemetadata: name: myvmspec: domain: resources: requests: memory: 64Mi memory: hugepages: pageSize: 2Mi disks: - name: myimage volumeName: myimage disk: {} volumes: - name: myimage persistentVolumeClaim: claimname: myclaimNote that a node must have pre-allocated hugepages hugepages size cannot be bigger than requested memory requested memory must be divisible by hugepages sizesetting network interface model and MAC address: the following syntax within interfaces section allows us to set both a mac address and network model kind: VMspec: domain: devices: interfaces: - name: red macAddress: de:ad:00:00:be:af model: e1000 bridge: {} networks: - name: red pod: {}alternative network models can be e1000 e1000e ne2k_pci pcnet rtl8139 virtiosetting a disks serial number: The new keyword serial in the disks section allows us to specify a serial number apiVersion: kubevirt. io/v1alpha1kind: VirtualMachinemetadata: name: myvmspec: domain: resources: requests: memory: 64Mi disks: - name: myimage volumeName: myimage serial: sn-11223344 disk: {} volumes: - name: myimage persistentVolumeClaim: claimname: myclaimspecifying the CPU model: Setting the CPU model is possible via spec. domain. cpu. model. The following VM will have a CPU with the Conroe model: metadata: name: myvmispec: domain: cpu: # this sets the CPU model model: ConroeThe available models are listed here Additionally, we can also use host-model host-passthroughvirtctl expose: To access services listening within vms, we can expose their ports using standard kubernetes services. Alternatively, we can make use of the virtctl binary to achieve the same result: to expose a cluster ip servicevirtctl expose virtualmachineinstance vmi-ephemeral --name vmiservice --port 27017 --target-port 22 to expose a node port servicevirtctl expose virtualmachineinstance vmi-ephemeral --name nodeport --type NodePort --port 27017 --target-port 22 --node-port 30000 to expose a load balancer servicevirtctl expose virtualmachineinstance vmi-ephemeral --name lbsvc --type LoadBalancer --port 27017 --target-port 3389Kubernetes compatible networking approach (SLIRP): In slirp mode, virtual machines are connected to the network backend using QEMU user networking mode. In this mode, QEMU allocates internal IP addresses to virtual machines and hides them behind NAT. kind: VMspec: domain: devices: interfaces: - name: red slirp: {} # connect using SLIRP mode networks: - name: red pod: {}Role aggregation for our roles: Every KubeVirt installation after version v0. 5. 1 comes a set of default RBAC cluster roles that can be used to grant users access to VirtualMachineInstances. The kubevirt. io:admin and kubevirt. io:edit ClusterRoles have console and VNC access permissions built into them. ConclusionThis concludes our review of latest kubevirt features. Enjoy them ! " }, { - "id": 132, + "id": 131, "url": "/2018/changelog-v0.7.0.html", "title": "KubeVirt v0.7.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 7. 0: Released on: Wed Jul 4 17:41:33 2018 +0200 CI: Move test storage to hostPath CI: Add support for Kubernetes 1. 10. 4 CI: Improved network tests for multiple-interfaces CI: Drop Origin 3. 9 support CI: Add test for testing templates on Origin VM to VMI rename VM affinity and anti-affinity Add awareness for multiple networks Add hugepage support Add device-plugin based kvm Add support for setting the network interface model Add (basic and inital) Kubernetes compatible networking approach (SLIRP) Add role aggregation for our roles Add support for setting a disks serial number Add support for specyfing the CPU model Add support for setting an network intefraces MAC address Relocate binaries for FHS conformance Logging improvements Template fixes Fix OpenShift CRD validation virtctl: Improve vnc logging improvements virtctl: Add expose virtctl: Use PATCH instead of PUT" }, { - "id": 133, + "id": 132, "url": "/2018/Unit-Test-Howto.html", "title": "Unit Test Howto", "author" : "yuvalif", "tags" : "unit testing", "body": "There are way too many reasons to write unit tests, but my favorite one is: the freedom to hack, modify and improve the code without fear, and get quick feedback that you are on the right track. Of course, writing good integration tests (the stuff under the tests directory) is the best way to validate that everything works, but unit tests has great value as: They are much faster to run (~30 seconds in our case) You get nice coverage reports with coveralls No need to: make cluster up/sync Cover corner cases and easier to debug Some Notes: We use same frameworks (ginkgo, gomega) for unit testing and integration testing, which means that with the same learning curve, you can develop much more! “Bang for the Buck” - it usually takes 20% of the time to get to 80% coverage, and 80% of the time to get to 100%. Which mean that you have to use common sense when improving coverage - some code is just fine with 80% coverage (e. g. large files calling some other APIs with little logic), and other would benefit from getting close to 100% (e. g. complex core functionality handling lots of error cases) Follow the “boy (or girl) scout rule” - every time you enhance/fix some code, add more testing around the existing code as well Avoid “white box testing”, as this will cause endless maintenance of the test code. Best way to assure that, is to put the test code under a different package than the code under test Explore coveralls. Not only it will show you the coverage and the overall trend, it will also help you understand which tests are missing. When drilling down into a file, you can see hits per line, and make better decision on what needs to be covered next FrameworksThere are several frameworks we use to write unit tests: The tests themselves are written using ginkgo, which is a Behavior-Driven Development (BDD) framework The library used for assertions in the tests is gomega. It has a very rich set of matchers, so, before you write you own code around the “equal” matcher, check here to see if there is a more expressive assertion you can use We use GoMock to generate mocks for the different kubevirt interfaces and objects. The command make generate will (among other things) create a file holding the mocked version of our objects and interfaces Many examples exist in our code on how to use this framework Also see here for sample code from GoMock If you need mocks for k8s objects and interfaces, use their framework. They have a tool called client-gen, which generates both the code and the mocks based on the defined APIs The generated mock interfaces and objects of the k8s client are here. Note that they a use a different mechanism to control the mocked behavior than the one used in GoMock Mocked actions are more are here Unit test utilities are placed under testutils Some integration test utilities are also useful for unit testing, see this file When testing interfaces, a mock HTTP server is usually needed. For that we use the golang httptest package gomega also has a package called ghttp that could be used for same purpose Best Practices and Tipsginkgo: Don’t mix setup and tests, use BeforeEach/JustBeforeEach for setup and It/Specify for tests Don’t write setup/cleanup code under Describe/Context clause, which is not inside BeforeEach/AfterEach etc. Make sure that any state change inside an “It” clause, that may impact other tests, is reverted in “AfterEach” Don’t assume the “It” clauses, which are at the same level, are invoked in any specific ordergomega: Be verbose and use specific matchers. For example, to check that an array has N elements, you can use: Expect(len(arr)). To(Equal(N))But a better way would be: Expect(arr). To(HaveLen(N))Function Override: Sometimes the code under test is invoking a function which is not mocked. In most cases, this is an indication that the code needs to be refactored, so this function, or its return values, will be passed as part of the API of the code being tested. However, if this refactoring is not possible (or too costly), you can inject your own implementation of this function. The original function should be defined as a closure, and assigned to a global variable. Since functions are 1st class citizens in go, you can assign your implementation to that function variable. More detailed example is here " }, { - "id": 134, + "id": 133, "url": "/2018/Run-Istio-with-kubevirt.html", "title": "Run Istio With Kubevirt", "author" : "SchSeba", "tags" : "istio, service mesh", "body": "On this blog post, we are going to deploy virtual machines with the KubeVirt project and insert them into the Istio service mesh. Some information about the technologies we are going to use in this blog post. Kubernetes: Production-Grade Container Orchestration. Kubernetes is an open-source system for automating deployment, scaling, and management of containerized applications. Kubeadm: kubeadm helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. Calico: Calico provides secure network connectivity for containers and virtual machine workloads. Calico creates and manages a flat layer 3 network, assigning each workload a fully routable IP address. Workloads can communicate without IP encapsulation or network address translation for bare metal performance, easier troubleshooting, and better interoperability. In environments that require an overlay, Calico uses IP-in-IP tunneling or can work with other overlay networking such as flannel. KubeVirt: Virtualization API for kubernetes in order to manage virtual machines KubeVirt technology addresses the needs of development teams that have adopted or want to adopt Kubernetes but possess existing Virtual Machine-based workloads that cannot be easily containerized. More specifically, the technology provides a unified development platform where developers can build, modify, and deploy applications residing in both Application Containers as well as Virtual Machines in a common, shared environment. Benefits are broad and significant. Teams with a reliance on existing virtual machine-based workloads are empowered to rapidly containerize applications. With virtualized workloads placed directly in development workflows, teams can decompose them over time while still leveraging remaining virtualized components as is comfortably desired. Istio: An open platform to connect, manage, and secure microservices. Istio provides an easy way to create a network of deployed services with load balancing, service-to-service authentication, monitoring, and more, without requiring any changes in service code. You add Istio support to services by deploying a special sidecar proxy throughout your environment that intercepts all network communication between microservices, configured and managed using Istio’s control plane functionality. Bookinfo application: A simple application that displays information about a book, similar to a single catalog entry of an online book store. Displayed on the page is a description of the book, book details (ISBN, number of pages, and so on), and a few book reviews. The Bookinfo application is broken into four separate microservices: productpage. The productpage microservice calls the details and reviews microservices to populate the page. details. The details microservice contains book information. reviews. The reviews microservice contains book reviews. It also calls the ratings microservice. ratings. The ratings microservice contains book ranking information that accompanies a book review. Note: This demo is going to be deployed on a kubernetes 1. 10 cluster. Requirements docker kubeadmFollow this document to install everything we need for the POC DeploymentFor the POC we clone this repo The repo contains all the configuration we need to deploy KubeVirt and Istio. kubevirt. yaml istio-demo-auth. yamlIt also contains the deployment configuration of our sample application. bookinfo. yaml bookinfo-gateway. yamlRun the bash script cd kubevirt-istio-poc. /deploy-istio-poc. shDemo applicationWe are going to use the bookinfo sample application from the istio webpage. The following yaml will deploy the bookinfo application with a ‘small’ change the details service will run on a virtual machine inside our kubernetes cluster! Note: it will take like 5 minutes for the application to by running inside the virtual machine because we install git and ruby, then clone the istio repo and start the application. POC detailsLets start with the bash script: #!/bin/bashset -xkubeadm init --pod-network-cidr=192. 168. 0. 0/16yes | cp -i /etc/kubernetes/admin. conf $HOME/. kube/configkubectl apply -f https://docs. projectcalico. org/v3. 0/getting-started/kubernetes/installation/hosted/kubeadm/1. 7/calico. yamlwhile [[ $(kubectl get po -n kube-system | grep kube-dns | grep Running | wc -l) -eq 0 ]]do echo Calico deployment is no ready yet. sleep 5doneecho Calico is ready. echo Taint the master node. kubectl taint nodes --all node-role. kubernetes. io/master-echo Deploy kubevirt. kubectl apply -f https://github. com/kubevirt/kubevirt/releases/download/v0. 7. 0/kubevirt. yamlecho Deploy istio. kubectl apply -f istio-demo-auth. yamlecho Add istio-injection to the default namespace. kubectl label namespace default istio-injection=enabledwhile [[ $(kubectl get po -n istio-system | grep sidecar-injector | grep Running | wc -l) -eq 0 ]]do echo Istio deployment is no ready yet. sleep 5doneecho Istio is ready. sleep 20echo Deploy the bookinfo example applicationkubectl apply -f bookinfo. yamlkubectl apply -f bookinfo-gateway. yamlThe follow script create a kubernetes cluster using the kubeadm command, deploy calico as a network CNI and taint the master node (have only one node in the cluster). After the cluster is up the script deploy both istio with mutual TLS and kubevirt projects, it also add the auto injection to the default namespace. At last the script deploy the bookinfo demo application that we change a bit. Lets take a closer look in the virtual machine part inside the bookinfo. yaml file ################################################################################################### Details service##################################################################################################apiVersion: v1kind: Servicemetadata: name: details labels: app: detailsspec: ports: - port: 9080 name: http selector: app: details---apiVersion: kubevirt. io/v1alpha2kind: VirtualMachineInstancemetadata: creationTimestamp: null labels: special: vmi-details app: details version: v1 name: vmi-detailsspec: domain: devices: disks: - disk: bus: virtio name: registrydisk volumeName: registryvolume - disk: bus: virtio name: cloudinitdisk volumeName: cloudinitvolume interfaces: - name: testSlirp slirp: {} ports: - name: http port: 9080 machine: type: resources: requests: memory: 1024M networks: - name: testSlirp pod: {} terminationGracePeriodSeconds: 0 volumes: - name: registryvolume registryDisk: image: kubevirt/fedora-cloud-container-disk-demo:latest - cloudInitNoCloud: userData: |- #!/bin/bash echo fedora |passwd fedora --stdin yum install git ruby -y git clone https://github. com/istio/istio. git cd istio/samples/bookinfo/src/details/ ruby details. rb 9080 & name: cloudinitvolumestatus: {}---. . . . . . . . . . Details:: Create a network of type podnetworks: - name: testSlirp pod: {} Create an interface of type slirp and connect it to the pod network by matching the pod network name Add our application portinterfaces: - name: testSlirp slirp: {} ports: - name: http port: 9080 Use the cloud init script to download install and run the details application- cloudInitNoCloud: userData: |- #!/bin/bash echo fedora |passwd fedora --stdin yum install git ruby -y git clone https://github. com/istio/istio. git cd istio/samples/bookinfo/src/details/ ruby details. rb 9080 & name: cloudinitvolumePOC CheckAfter running the bash script the environment should look like this NAME READY STATUS RESTARTS AGEproductpage-v1-7bbdd59459-w6nwq 2/2 Running 0 1hratings-v1-76dc7f6b9-6n6s9 2/2 Running 0 1hreviews-v1-64545d97b4-tvgl2 2/2 Running 0 1hreviews-v2-8cb9489c6-wjp9x 2/2 Running 0 1hreviews-v3-6bc884b456-hr5bm 2/2 Running 0 1hvirt-launcher-vmi-details-94pb6 3/3 Running 0 1hLet’s find the istio ingress service port # kubectl get service -n istio-system | grep istio-ingressgatewayistio-ingressgateway LoadBalancer 10. 97. 163. 91 <pending> 80:31380/TCP,443:31390/TCP,31400:31400/TCP 3hThen browse the following url http://<k8s-node-ip-address>:<istio-ingress-service-port-exposed-by-k8s>/productpageExample: http://10. 0. 0. 1:31380/productpageConclusionsThis POC show how we can use KubeVirt with Istio to integrate the Istio service mesh to virtual machine workloads running inside our kubernetes cluster. " }, { - "id": 135, + "id": 134, "url": "/2018/KVM-Using-Device-Plugins.html", "title": "Kvm Using Device Plugins", "author" : "stu-gott", "tags" : "kvm, qemu, device plugins", "body": "As of Kubernetes 1. 10, the Device Plugins API is now in beta! KubeVirt is nowusing this framework to provide hardware acceleration and network devices tovirtual machines. The motivation behind this is that virt-launcher pods are nolonger responsible for creating their own device nodes. Or stated another way:virt-launcher pods no longer require excess privileges just for the purpose ofcreating device nodes. Kubernetes Device Plugin Basics: Device Plugins consist of two main parts: a server that provides devices andpods that consume them. Each plugin server is used to share a preconfiguredlist of devices local to the node with pods scheduled on that node. Kubernetesmarks each node with the devices it’s capable of sharing, and uses the presenceof such devices when scheduling pods. Device Plugins In KubeVirt: Providing Devices: In KubeVirt virt-handler takes on the role of the device plugin server. When itstarts up on each node, it registers with the Kubernetes Device Plugin API andadvertises KVM and TUN devices. apiVersion: v1kind: Nodemetadata: . . . spec: . . . status: allocatable: cpu: 2 devices. kubevirt. io/kvm: 110 devices. kubevirt. io/tun: 110 pods: 110 . . . capacity: cpu: 2 devices. kubevirt. io/kvm: 110 devices. kubevirt. io/tun: 110 pods: 110 . . . In this case advertising 110 KVM or TUN devices is simply an arbitrary defaultbased on the number of pods that node is limited to. Consuming Devices: Now any pod that requests a devices. kubevirt. io/kvm ordevices. kubevirt. io/tun device can only be scheduled on nodes which providethem. On clusters where KubeVirt is deployed this conveniently happens to beall nodes in the cluster that have these physical devices, which normally meansall nodes in the cluster. Here’s an excerpt of what the pod spec looks like in this case. apiVersion: v1kind: Podmetadata: . . . spec: containers: - command: - /entrypoint. sh . . . name: compute . . . resources: limits: devices. kubevirt. io/kvm: 1 devices. kubevirt. io/tun: 1 requests: devices. kubevirt. io/kvm: 1 devices. kubevirt. io/tun: 1 memory: 161679432 securityContext: capabilities: add: - NET_ADMIN privileged: false runAsUser: 0 . . . Of special note is the securityContext stanza. The only special privilegerequired is the NET_ADMIN capability! This is needed by libvirt to set up thedomain’s networking stack. " }, { - "id": 136, + "id": 135, "url": "/2018/Proxy-vm-conclusion.html", "title": "Proxy VM Conclusion", "author" : "SchSeba", "tags" : "istio, multus, roadmap", "body": "This blog post follow my previous research on how to allow vms inside a k8s cluster tp play nice with istio and other sidecars. Research conclusions and network roadmapAfter the deep research about different options/ways to connect VM to pods, we find that all the solution have different pros and cons. All the represented solution need access to kernel modules and have the risk of conflicting with other networking tools. We decided to implement a 100% Kubernetes compatible network approach on the KubeVirt project by using the slirp interface qemu provides. This approach let the VM (from a networking perspective) behave like a process. Thus all traffic is going in and out of TCP or UDP sockets. The approach especially needs to avoid to rely on any specific Kernel configurations (like iptables, ebtables, tc, …) in order to not conflict with other Kubernetes networking tools like Istio or multus. This is just an intermediate solution, because it’s shortcomings (unmaintained, unsafe, not performing well) Slirp interface: Pros: vm ack like a process No external modules needed No external process needed Works with any sidecar solution no rely on any specific Kernel configurations pod can run without privilegeCons: poor performance use userspace network stackIptables only: Pros: No external modules needed No external process needed All the traffic is handled by the kernel user space not involvedCons: Istio dedicated solution! Not other process can change the iptables rulesIptables with a nat-proxy: Pros: No external modules needed Works with any sidecar solutionCons: Not other process can change the iptables rules External process needed The traffic is passed to user space Only support ingress TCP connectionIptables with a trasperent-proxy: Pros: other process can change the nat table (this solution works on the mangle table) better preformance comparing to nat-proxy Works with any sidecar solutionCons: Need NET_ADMIN capability for the docker External process needed The traffic is passed to user space Only support ingress TCP connection" }, { - "id": 137, + "id": 136, "url": "/2018/changelog-v0.6.0.html", "title": "KubeVirt v0.6.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 6. 0: Released on: Mon Jun 11 09:30:28 2018 +0200 A range of flakyness reducing test fixes Vagrant setup got deprectated Updated Docker and CentOS versions Add Kubernetes 1. 10. 3 to test matrix A couple of ginkgo concurrency fixes A couple of spelling fixes A range if infra updates Use /dev/kvm if possible, otherwise fallback to emulation Add default view/edit/admin RBAC Roles Network MTU fixes CDRom drives are now read-only Secrets can now be correctly referenced on VMs Add disk boot ordering Add virtctl version Add virtctl expose Fix virtual machine memory calculations Add basic virtual machine Network API" }, { - "id": 138, + "id": 137, "url": "/2018/Non-Dockerized-Build.html", "title": "Non Dockerized Build", "author" : "yuvalif", "tags" : "docker, container, build", "body": "In this post we will set up an alternative to the existing containerized build system used in KubeVirt. A new makefile will be presented here, which you can for experimenting (if you are brave enough…) Why?Current build system for KubeVirt is done inside docker. This ensures a robust and consistent build environment: No need to install system dependencies Controlled versions of these dependencies Agnostic of local golang environmentSo, in general, you should just use the dockerized build system. Still, there are some drawbacks there: Tool integration: Since your tools are not running in the dockerized environment, they may give different outcome than the ones running in the dockerized environment Invoking any of the dockerized scripts (under hack directory) may be inconsistent with the outside environment (e. g. file path is different than the one on your machine) Build time: the dockerized build has some small overheads, and some improvements are still needed to make sure that caching work properly and build is optimized And last, but not least, sometimes it is just hard to resist the tinkering…How?: Currently, the Makefile includes targets that address different things: building, dependencies, cluster management, testing etc. - here I tried to modify the minimum which is required for non-containerized build. Anything not related to it, should just be done using the existing Makefile. note “NoteCross compilation is not covered here (e. g. building virtctl for mac and windows) Prerequisites: Best place to look for that is in the docker file definition for the build environment: hack/docker-builder/Dockerfile Note that not everything from there is needed for building, so the bare minimum on Fedora27 would be: sudo dnf install -y gitsudo dnf install -y libvirt-develsudo dnf install -y golangsudo dnf install -y dockersudo dnf install -y qemu-imgSimilarly to the containerized case, docker is still needed (e. g. all the cluster stuff is done via docker), and therefore, any docker related preparations are needed as well. This would include running docker on startup and making sure that docker commands does not need root privileges. On Fedora27 this would mean: sudo groupadd dockersudo usermod -aG docker $USERsudo systemctl enable dockersudo systemctl start dockerNow, getting the actual code could be done either via go get (don’t forget to set the GOPATH environment variable): go get -d kubevirt. io/kubevirt/. . . Or git clone: mkdir -p $GOPATH/src/kubevirt. io/ && cd $GOPATH/src/kubevirt. io/git clone https://github. com/kubevirt/kubevirtMakefile. nocontainer: all: buildbootstrap: go get -u github. com/onsi/ginkgo/ginkgo go get -u mvdan. cc/sh/cmd/shfmt go get -u -d k8s. io/code-generator/cmd/deepcopy-gen go get -u -d k8s. io/code-generator/cmd/defaulter-gen go get -u -d k8s. io/code-generator/cmd/openapi-gen cd ${GOPATH}/src/k8s. io/code-generator/cmd/deepcopy-gen && git checkout release-1. 9 && go install cd ${GOPATH}/src/k8s. io/code-generator/cmd/defaulter-gen && git checkout release-1. 9 && go install cd ${GOPATH}/src/k8s. io/code-generator/cmd/openapi-gen && git checkout release-1. 9 && go installgenerate: . /hack/generate. shapidocs: generate . /hack/gen-swagger-doc/gen-swagger-docs. sh v1 htmlbuild: check go install -v . /cmd/. . . . /pkg/. . . . /hack/copy-cmd. shtest: build go test -v -cover . /pkg/. . . check: . /hack/check. shOUT_DIR=. /_outTESTS_OUT_DIR=${OUT_DIR}/testsfunctest: build go build -v . /tests/. . . ginkgo build . /tests mkdir -p ${TESTS_OUT_DIR}/ mv . /tests/tests. test ${TESTS_OUT_DIR}/ . /hack/functests. shcluster-sync: build . /hack/build-copy-artifacts. sh . /hack/build-manifests. sh . /hack/build-docker. sh build . /cluster/clean. sh . /cluster/deploy. sh. PHONY: bootstrap generate apidocs build test check functest cluster-syncTargets: To execute any of the targets use: make -f Makefile. nocontainer <target>File has the following targets: bootstrap: this is actually part of the prerequisites, but added all golang tool dependencies here, since this is agnostic of the running platform Should be called once Note that the k8s code generators use specific version Note that these are not code dependencies, as they are handled by using a vendor directory, as well as the distclean, deps-install and deps-update targets in the standard makefile generate: Calling hack/generate. sh script similarly to the standard makefile. It builds all generators (under the tools directory) and use them to generate: test mocks, KubeVirt resources and test yamls apidocs: this is similar to apidocs target in the standard makefile build: this is building all product binaries, and then using a script (copy-cmd. sh, should be placed under: hack) to copy the binaries from their standard location into the _out directory, where the cluster management scripts expect them test: building and running unit testscheck: using similar code to the one used in the standard makefile: formatting files, fixing package imports and calling go vet functest: building and running integration tests. After tests are built , they are moved to the _out directory so that the standard script for running integration tests would find them cluster-sync: this is the only “cluster management” target that had to be modified from the standard makefile" }, { - "id": 139, + "id": 138, "url": "/2018/Research-run-VMs-with-istio-service-mesh.html", "title": "Research Run Vms With Istio Service Mesh", "author" : "SchSeba", "tags" : "istio, iptables, libvirt, tproxy, service mesh, ebtables", "body": "In this blog post we are going to talk about istio and virtual machines on top of Kubernetes. Some of the components we are going to use are istio, libvirt, ebtables, iptables, and tproxy. Please review the links provided for an overview and deeper dive into each technology Research explanationOur research goal was to give virtual machines running inside pods (KubeVirt project) all the benefits Kubernetes have to offer, one of them is a service mesh like istio. Iptables only with dnat and source nat configuration: This configuration is istio only! For this solution we created the following architecture With the following yaml configuration apiVersion: v1kind: Servicemetadata: name: application-devel labels: app: libvirtd-develspec: ports: - port: 9080 name: http selector: app: libvirtd-devel---apiVersion: v1kind: Servicemetadata: name: libvirtd-client-devel labels: app: libvirtd-develspec: ports: - port: 16509 name: client-connection - port: 5900 name: spice - port: 22 name: ssh selector: app: libvirtd-devel type: LoadBalancer---apiVersion: extensions/v1beta1kind: Deploymentmetadata: creationTimestamp: null name: libvirtd-develspec: replicas: 1 strategy: {} template: metadata: annotations: sidecar. istio. io/status: '{ version : 43466efda2266e066fb5ad36f2d1658de02fc9411f6db00ccff561300a2a3c78 , initContainers :[ istio-init , enable-core-dump ], containers :[ istio-proxy ], volumes :[ istio-envoy , istio-certs ]}' creationTimestamp: null labels: app: libvirtd-devel spec: containers: - image: docker. io/sebassch/mylibvirtd:devel imagePullPolicy: Always name: compute ports: - containerPort: 9080 - containerPort: 16509 - containerPort: 5900 - containerPort: 22 securityContext: capabilities: add: - ALL privileged: true runAsUser: 0 volumeMounts: - mountPath: /var/lib/libvirt/images name: test-volume - mountPath: /host-dev name: host-dev - mountPath: /host-sys name: host-sys resources: {} env: - name: LIBVIRTD_DEFAULT_NETWORK_DEVICE value: eth0 - args: - proxy - sidecar - --configPath - /etc/istio/proxy - --binaryPath - /usr/local/bin/envoy - --serviceCluster - productpage - --drainDuration - 45s - --parentShutdownDuration - 1m0s - --discoveryAddress - istio-pilot. istio-system:15005 - --discoveryRefreshDelay - 1s - --zipkinAddress - zipkin. istio-system:9411 - --connectTimeout - 10s - --statsdUdpAddress - istio-mixer. istio-system:9125 - --proxyAdminPort - 15000 - --controlPlaneAuthPolicy - MUTUAL_TLS env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata. name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata. namespace - name: INSTANCE_IP valueFrom: fieldRef: fieldPath: status. podIP image: docker. io/istio/proxy:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-proxy resources: {} securityContext: privileged: false readOnlyRootFilesystem: true runAsUser: 1337 volumeMounts: - mountPath: /etc/istio/proxy name: istio-envoy - mountPath: /etc/certs/ name: istio-certs readOnly: true initContainers: - args: - -p - 15001 - -u - 1337 image: docker. io/istio/proxy_init:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-init resources: {} securityContext: capabilities: add: - NET_ADMIN - args: - -c - sysctl -w kernel. core_pattern=/etc/istio/proxy/core. %e. %p. %t && ulimit -c unlimited command: - /bin/sh image: alpine imagePullPolicy: IfNotPresent name: enable-core-dump resources: {} securityContext: privileged: true volumes: - emptyDir: medium: Memory name: istio-envoy - name: istio-certs secret: optional: true secretName: istio. default - name: host-dev hostPath: path: /dev type: Directory - name: host-sys hostPath: path: /sys type: Directory - name: test-volume hostPath: # directory location on host path: /bricks/brick1/volume/Images # this field is optional type: Directorystatus: {}---apiVersion: extensions/v1beta1kind: Ingressmetadata: name: gateway-devel annotations: kubernetes. io/ingress. class: istio spec: rules: - http: paths: - path: /devel-myvm backend: serviceName: application-devel servicePort: 9080When the my-libvirt container starts it runs an entry point script for iptables configuration. 1. iptables -t nat -D PREROUTING 12. iptables -t nat -A PREROUTING -p tcp -m comment --comment KubeVirt Spice --dport 5900 -j ACCEPT3. iptables -t nat -A PREROUTING -p tcp -m comment --comment KubeVirt virt-manager --dport 16509 -j ACCEPT4. iptables -t nat -A PREROUTING -d 10. 96. 0. 0/12 -m comment --comment istio/redirect-ip-range-10. 96. 0. 0/12-service cidr -j ISTIO_REDIRECT5. iptables -t nat -A PREROUTING -d 192. 168. 0. 0/16 -m comment --comment istio/redirect-ip-range-192. 168. 0. 0/16-Pod cidr -j ISTIO_REDIRECT6. iptables -t nat -A OUTPUT -d 127. 0. 0. 1/32 -p tcp -m comment --comment KubeVirt mesh application port --dport 9080 -j DNAT --to-destination 10. 0. 0. 27. iptables -t nat -A POSTROUTING -s 127. 0. 0. 1/32 -d 10. 0. 0. 2/32 -m comment --comment KubeVirt VM Forward -j SNAT --to-source `ifconfig eth0 | grep inet | awk '{print $2}'Now let’s explain every one of these lines: Remove istio ingress connection rule that send all the ingress traffic directly to the envoy proxy (our vm traffic is ingress traffic for our pod) Allow ingress connection with spice port to get our libvirt process running in the pod Allow ingress connection with virt-manager port to get our libvirt process running in the pod Redirect all the traffic that came from the k8s clusters services to the envoy process Redirect all the traffic that came from the k8s clusters pods to the envoy process Send all the traffic that came from envoy process to our vm by changing the destination ip address to ur vm ip address Change the source ip address of the packet send by envoy from localhost to the pod ip address so the virtual machine can return the connectionIptables configuration conclusions: With this configuration all the traffic that exit the virtual machine to a k8s service will pass the envoy process and will enter the istio service mash. Also all the traffic that came into the pod will be pass to envoy and after that it will be send to our virtual machine Egress data flow in this solution: Ingress data flow in this solution: Pros: No external modules needed No external process needed All the traffic is handled by the kernel user space not involvedCons: Istio dedicated solution! Not other process can change the iptables rulesIptables with a nat-proxy process: For this solution a created the following architecture With the following yaml configuration apiVersion: v1kind: Servicemetadata: name: application-nat-proxt labels: app: libvirtd-nat-proxtspec: ports: - port: 9080 name: http selector: app: libvirtd-nat-proxt type: LoadBalancer---apiVersion: v1kind: Servicemetadata: name: libvirtd-client-nat-proxt labels: app: libvirtd-nat-proxtspec: ports: - port: 16509 name: client-connection - port: 5900 name: spice - port: 22 name: ssh selector: app: libvirtd-nat-proxt type: LoadBalancer---apiVersion: extensions/v1beta1kind: Deploymentmetadata: creationTimestamp: null name: libvirtd-nat-proxtspec: replicas: 1 strategy: {} template: metadata: annotations: sidecar. istio. io/status: '{ version : 43466efda2266e066fb5ad36f2d1658de02fc9411f6db00ccff561300a2a3c78 , initContainers :[ istio-init , enable-core-dump ], containers :[ istio-proxy ], volumes :[ istio-envoy , istio-certs ]}' creationTimestamp: null labels: app: libvirtd-nat-proxt spec: containers: - image: docker. io/sebassch/mylibvirtd:devel imagePullPolicy: Always name: compute ports: - containerPort: 9080 - containerPort: 16509 - containerPort: 5900 - containerPort: 22 securityContext: capabilities: add: - ALL privileged: true runAsUser: 0 volumeMounts: - mountPath: /var/lib/libvirt/images name: test-volume - mountPath: /host-dev name: host-dev - mountPath: /host-sys name: host-sys resources: {} env: - name: LIBVIRTD_DEFAULT_NETWORK_DEVICE value: eth0 - image: docker. io/sebassch/mynatproxy:devel imagePullPolicy: Always name: proxy resources: {} securityContext: privileged: true capabilities: add: - NET_ADMIN - args: - proxy - sidecar - --configPath - /etc/istio/proxy - --binaryPath - /usr/local/bin/envoy - --serviceCluster - productpage - --drainDuration - 45s - --parentShutdownDuration - 1m0s - --discoveryAddress - istio-pilot. istio-system:15005 - --discoveryRefreshDelay - 1s - --zipkinAddress - zipkin. istio-system:9411 - --connectTimeout - 10s - --statsdUdpAddress - istio-mixer. istio-system:9125 - --proxyAdminPort - 15000 - --controlPlaneAuthPolicy - MUTUAL_TLS env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata. name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata. namespace - name: INSTANCE_IP valueFrom: fieldRef: fieldPath: status. podIP image: docker. io/istio/proxy:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-proxy resources: {} securityContext: privileged: false readOnlyRootFilesystem: true runAsUser: 1337 volumeMounts: - mountPath: /etc/istio/proxy name: istio-envoy - mountPath: /etc/certs/ name: istio-certs readOnly: true initContainers: - args: - -p - 15001 - -u - 1337 - -i - 10. 96. 0. 0/12,192. 168. 0. 0/16 image: docker. io/istio/proxy_init:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-init resources: {} securityContext: capabilities: add: - NET_ADMIN - args: - -c - sysctl -w kernel. core_pattern=/etc/istio/proxy/core. %e. %p. %t && ulimit -c unlimited command: - /bin/sh image: alpine imagePullPolicy: IfNotPresent name: enable-core-dump resources: {} securityContext: privileged: true volumes: - emptyDir: medium: Memory name: istio-envoy - name: istio-certs secret: optional: true secretName: istio. default - name: host-dev hostPath: path: /dev type: Directory - name: host-sys hostPath: path: /sys type: Directory - name: test-volume hostPath: # directory location on host path: /bricks/brick1/volume/Images # this field is optional type: Directorystatus: {}---apiVersion: extensions/v1beta1kind: Ingressmetadata: name: gateway-nat-proxt annotations: kubernetes. io/ingress. class: istio spec: rules: - http: paths: - path: /nat-proxt-myvm backend: serviceName: application-nat-proxt servicePort: 9080When the mynatproxy container starts it runs an entry point script for iptables configuration. 1. iptables -t nat -I PREROUTING 1 -p tcp -s 10. 0. 1. 2 -m comment --comment nat-proxy redirect -j REDIRECT --to-ports 80802. iptables -t nat -I OUTPUT 1 -p tcp -s 10. 0. 1. 2 -j ACCEPT3. iptables -t nat -I POSTROUTING 1 -s 10. 0. 1. 2 -p udp -m comment --comment nat udp connections -j MASQUERADENow let’s explain every one of these lines: Redirect all the tcp traffic that came from the virtual machine to our proxy on port 8080 Accept all the traffic that go from the pod to the virtual machine Nat all the udp traffic that came from the virtual machineThis solution uses a container I created that has two processes inside, one for the egress traffic of the virtual machine and one for the ingress traffic. For the egress traffic I used a program writen in golang, and for the ingress traffic I used haproxy. The nat-proxy used a system call to get the original destination address and port that it’s being redirected to us from the iptables rules I created. The extract function: func getOriginalDst(clientConn *net. TCPConn) (ipv4 string, port uint16, newTCPConn *net. TCPConn, err error) { if clientConn == nil { log. Printf( copy(): oops, dst is nil! ) err = errors. New( ERR: clientConn is nil ) return } // test if the underlying fd is nil remoteAddr := clientConn. RemoteAddr() if remoteAddr == nil { log. Printf( getOriginalDst(): oops, clientConn. fd is nil! ) err = errors. New( ERR: clientConn. fd is nil ) return } srcipport := fmt. Sprintf( %v , clientConn. RemoteAddr()) newTCPConn = nil // net. TCPConn. File() will cause the receiver's (clientConn) socket to be placed in blocking mode. // The workaround is to take the File returned by . File(), do getsockopt() to get the original // destination, then create a new *net. TCPConn by calling net. Conn. FileConn(). The new TCPConn // will be in non-blocking mode. What a pain. clientConnFile, err := clientConn. File() if err != nil { log. Printf( GETORIGINALDST|%v->?->FAILEDTOBEDETERMINED|ERR: could not get a copy of the client connection's file object , srcipport) return } else { clientConn. Close() } // Get original destination // this is the only syscall in the Golang libs that I can find that returns 16 bytes // Example result: &{Multiaddr:[2 0 31 144 206 190 36 45 0 0 0 0 0 0 0 0] Interface:0} // port starts at the 3rd byte and is 2 bytes long (31 144 = port 8080) // IPv4 address starts at the 5th byte, 4 bytes long (206 190 36 45) addr, err := syscall. GetsockoptIPv6Mreq(int(clientConnFile. Fd()), syscall. IPPROTO_IP, SO_ORIGINAL_DST) log. Printf( getOriginalDst(): SO_ORIGINAL_DST=%+v\n , addr) if err != nil { log. Printf( GETORIGINALDST|%v->?->FAILEDTOBEDETERMINED|ERR: getsocketopt(SO_ORIGINAL_DST) failed: %v , srcipport, err) return } newConn, err := net. FileConn(clientConnFile) if err != nil { log. Printf( GETORIGINALDST|%v->?->%v|ERR: could not create a FileConn fron clientConnFile=%+v: %v , srcipport, addr, clientConnFile, err) return } if _, ok := newConn. (*net. TCPConn); ok { newTCPConn = newConn. (*net. TCPConn) clientConnFile. Close() } else { errmsg := fmt. Sprintf( ERR: newConn is not a *net. TCPConn, instead it is: %T (%v) , newConn, newConn) log. Printf( GETORIGINALDST|%v->?->%v|%s , srcipport, addr, errmsg) err = errors. New(errmsg) return } ipv4 = itod(uint(addr. Multiaddr[4])) + . + itod(uint(addr. Multiaddr[5])) + . + itod(uint(addr. Multiaddr[6])) + . + itod(uint(addr. Multiaddr[7])) port = uint16(addr. Multiaddr[2])<<8 + uint16(addr. Multiaddr[3]) return}After we get the original destination address and port we start a connection to it and copy all the packets. var streamWait sync. WaitGroupstreamWait. Add(2)streamConn := func(dst io. Writer, src io. Reader) { io. Copy(dst, src) streamWait. Done()}go streamConn(remoteConn, VMconn)go streamConn(VMconn, remoteConn)streamWait. Wait()The Haproxy help us with the ingress traffic with the follow configuration defaults mode tcpfrontend main bind *:9080 default_backend guestbackend guest server guest 10. 0. 1. 2:9080 maxconn 2048It sends all the traffic to our virtual machine on the service port the machine is listening. Code repository nat proxy conclusions: This solution is a general solution, not a dedicated solution to istio only. Its make the vm traffic look like a regular process inside the pod so it will work with any sidecars projects Egress data flow in this solution: Ingress data flow in this solution: Pros: No external modules needed Works with any sidecar solutionCons: Not other process can change the iptables rules External process needed The traffic is passed to user space Only support ingress TCP connectionIptables with a trasperent-proxy process: This is the last solution I used in my research, it use a kernel module named TPROXY The official documentation from the linux kernel documentation. For this solution I created the following architecture With the follow yaml configuration apiVersion: v1kind: Servicemetadata: name: application-devel labels: app: libvirtd-develspec: ports: - port: 9080 name: http selector: app: libvirtd-devel type: LoadBalancer---apiVersion: v1kind: Servicemetadata: name: libvirtd-client-devel labels: app: libvirtd-develspec: ports: - port: 16509 name: client-connection - port: 5900 name: spice - port: 22 name: ssh selector: app: libvirtd-devel type: LoadBalancer---apiVersion: extensions/v1beta1kind: Deploymentmetadata: creationTimestamp: null name: libvirtd-develspec: replicas: 1 strategy: {} template: metadata: annotations: sidecar. istio. io/status: '{ version : 43466efda2266e066fb5ad36f2d1658de02fc9411f6db00ccff561300a2a3c78 , initContainers :[ istio-init , enable-core-dump ], containers :[ istio-proxy ], volumes :[ istio-envoy , istio-certs ]}' creationTimestamp: null labels: app: libvirtd-devel spec: containers: - image: docker. io/sebassch/mylibvirtd:devel imagePullPolicy: Always name: compute ports: - containerPort: 9080 - containerPort: 16509 - containerPort: 5900 - containerPort: 22 securityContext: capabilities: add: - ALL privileged: true runAsUser: 0 volumeMounts: - mountPath: /var/lib/libvirt/images name: test-volume - mountPath: /host-dev name: host-dev - mountPath: /host-sys name: host-sys resources: {} env: - name: LIBVIRTD_DEFAULT_NETWORK_DEVICE value: eth0 - image: docker. io/sebassch/mytproxy:devel imagePullPolicy: Always name: proxy resources: {} securityContext: privileged: true capabilities: add: - NET_ADMIN - args: - proxy - sidecar - --configPath - /etc/istio/proxy - --binaryPath - /usr/local/bin/envoy - --serviceCluster - productpage - --drainDuration - 45s - --parentShutdownDuration - 1m0s - --discoveryAddress - istio-pilot. istio-system:15005 - --discoveryRefreshDelay - 1s - --zipkinAddress - zipkin. istio-system:9411 - --connectTimeout - 10s - --statsdUdpAddress - istio-mixer. istio-system:9125 - --proxyAdminPort - 15000 - --controlPlaneAuthPolicy - MUTUAL_TLS env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata. name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata. namespace - name: INSTANCE_IP valueFrom: fieldRef: fieldPath: status. podIP image: docker. io/istio/proxy:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-proxy resources: {} securityContext: privileged: false readOnlyRootFilesystem: true runAsUser: 1337 volumeMounts: - mountPath: /etc/istio/proxy name: istio-envoy - mountPath: /etc/certs/ name: istio-certs readOnly: true initContainers: - args: - -p - 15001 - -u - 1337 - -i - 10. 96. 0. 0/12,192. 168. 0. 0/16 image: docker. io/istio/proxy_init:0. 7. 1 imagePullPolicy: IfNotPresent name: istio-init resources: {} securityContext: capabilities: add: - NET_ADMIN - args: - -c - sysctl -w kernel. core_pattern=/etc/istio/proxy/core. %e. %p. %t && ulimit -c unlimited command: - /bin/sh image: alpine imagePullPolicy: IfNotPresent name: enable-core-dump resources: {} securityContext: privileged: true volumes: - emptyDir: medium: Memory name: istio-envoy - name: istio-certs secret: optional: true secretName: istio. default - name: host-dev hostPath: path: /dev type: Directory - name: host-sys hostPath: path: /sys type: Directory - name: test-volume hostPath: # directory location on host path: /bricks/brick1/volume/Images # this field is optional type: Directorystatus: {}---apiVersion: extensions/v1beta1kind: Ingressmetadata: name: gateway-devel annotations: kubernetes. io/ingress. class: istio spec: rules: - http: paths: - path: /devel-myvm backend: serviceName: application-devel servicePort: 9080When the tproxy container starts it runs an entry point script for iptables configuration but this time the proxy redirect came in the mangle table and not in the nat table that because TPROXY module avilable only in the mangle table. TPROXYThis target is only valid in the mangle table, in thePREROUTING chain and user-defined chains which are onlycalled from this chain. It redirects the packet to a localsocket without changing the packet header in any way. It canalso change the mark value which can then be used inadvanced routing rules. iptables rules: iptables -t mangle -vLiptables -t mangle -N KUBEVIRT_DIVERTiptables -t mangle -A KUBEVIRT_DIVERT -j MARK --set-mark 8iptables -t mangle -A KUBEVIRT_DIVERT -j ACCEPTtable=mangleiptables -t ${table} -N KUBEVIRT_INBOUNDiptables -t ${table} -A PREROUTING -p tcp -m comment --comment KubeVirt Spice --dport 5900 -j RETURNiptables -t ${table} -A PREROUTING -p tcp -m comment --comment KubeVirt virt-manager --dport 16509 -j RETURNiptables -t ${table} -A PREROUTING -p tcp -i vnet0 -j KUBEVIRT_INBOUNDiptables -t ${table} -N KUBEVIRT_TPROXYiptables -t ${table} -A KUBEVIRT_TPROXY ! -d 127. 0. 0. 1/32 -p tcp -j TPROXY --tproxy-mark 8/0xffffffff --on-port 9401#iptables -t mangle -A KUBEVIRT_TPROXY ! -d 127. 0. 0. 1/32 -p udp -j TPROXY --tproxy-mark 8/0xffffffff --on-port 8080# If an inbound packet belongs to an established socket, route it to the# loopback interface. iptables -t ${table} -A KUBEVIRT_INBOUND -p tcp -m socket -j KUBEVIRT_DIVERT#iptables -t mangle -A KUBEVIRT_INBOUND -p udp -m socket -j KUBEVIRT_DIVERT# Otherwise, it's a new connection. Redirect it using TPROXY. iptables -t ${table} -A KUBEVIRT_INBOUND -p tcp -j KUBEVIRT_TPROXY#iptables -t mangle -A KUBEVIRT_INBOUND -p udp -j KUBEVIRT_TPROXYiptables -t ${table} -I OUTPUT 1 -d 10. 0. 1. 2 -j ACCEPTtable=nat# Remove vm Connection from iptables rulesiptables -t ${table} -I PREROUTING 1 -s 10. 0. 1. 2 -j ACCEPTiptables -t ${table} -I OUTPUT 1 -d 10. 0. 1. 2 -j ACCEPT# Allow guest -> world -- using nat for UDPiptables -t ${table} -I POSTROUTING 1 -s 10. 0. 1. 2 -p udp -j MASQUERADEFor this solution we also need to load the bridge kernel module modprobe bridgeAnd create some ebtables rules so egress and ingress traffict from the virtial machine will exit the l2 rules and pass to the l3 rules: ebtables -t broute -F # Flush the table # inbound traffic ebtables -t broute -A BROUTING -p IPv4 --ip-dst 10. 0. 1. 2 \ -j redirect --redirect-target DROP # returning outbound traffic ebtables -t broute -A BROUTING -p IPv4 --ip-src 10. 0. 1. 2 \ -j redirect --redirect-target DROPWe also need to disable rp_filter on the virtual machine interface and the libvirt bridge interface echo 0 > /proc/sys/net/ipv4/conf/virbr0/rp_filterecho 0 > /proc/sys/net/ipv4/conf/virbr0-nic/rp_filterecho 0 > /proc/sys/net/ipv4/conf/vnet0/rp_filterAfter this configuration the container start the semi-tproxy process for egress traffic and the haproxy process for the ingress traffic. The semi-tproxy program is a golag program,binding a listener socket with the IP_TRANSPARENT socket optionPreparing a socket to receive connections with TProxy is really no different than what is normally done when setting up a socket to listen for connections. The only difference in the process is before the socket is bound, the IP_TRANSPARENT socket option. syscall. SetsockoptInt(fileDescriptor, syscall. SOL_IP, syscall. IP_TRANSPARENT, 1)About IP_TRANSPARENT IP_TRANSPARENT (since Linux 2. 6. 24)Setting this boolean option enables transparent proxying onthis socket. This socket option allows the calling applica‐tion to bind to a nonlocal IP address and operate both as aclient and a server with the foreign address as the localend‐point. NOTE: this requires that routing be set up ina way that packets going to the foreign address are routedthrough the TProxy box (i. e. , the system hosting theapplication that employs the IP_TRANSPARENT socket option). Enabling this socket option requires superuser privileges(the CAP_NET_ADMIN capability). TProxy redirection with the iptables TPROXY target alsorequires that this option be set on the redirected socket. Then we set the IP_TRANSPARENT socket option on outbound connectionsSame goes for making connections to a remote host pretending to be the client, the IP_TRANSPARENT socket option is set and the Linux kernel will allow the bind so along as a connection was intercepted with those details being used for the bind. When the process get a new connection we start a connection to the real destination address and copy the traffic between both sockets var streamWait sync. WaitGroupstreamWait. Add(2)streamConn := func(dst io. Writer, src io. Reader) { io. Copy(dst, src) streamWait. Done()}go streamConn(remoteConn, VMconn)go streamConn(VMconn, remoteConn)streamWait. Wait()The Haproxy helps us with the ingress traffic with the follow configuration defaults mode tcpfrontend main bind *:9080 default_backend guestbackend guest server guest 10. 0. 1. 2:9080 maxconn 2048It sends all the traffic to our virtual machine on the service port the machine is listening. Code repository tproxy conclusions: This solution is a general solution, not a dedicated solution to istio only. Its make the vm traffic look like a regular process inside the pod so it will work with any sidecars projects Egress data flow in this solution: Ingress data flow in this solution: Pros: other process can change the nat table (this solution works on the mangle table) better preformance comparing to nat-proxy Works with any sidecar solutionCons: Need NET_ADMIN capability for the docker External process needed The traffic is passed to user space Only support ingress TCP connectionResearch ConclustionKubeVirt shows it is possible to run virtual machines inside a kubernetes cluster, and this post shows that the virtual machine can also get the benefit of it. " }, { - "id": 140, + "id": 139, "url": "/2018/Use-VS-Code-for-Kube-Virt-Development.html", "title": "Use Vs Code For Kube Virt Development", "author" : "SchSeba", "tags" : "vscode, development, debug", "body": "In this post we will install and configure Visual Studio Code (vscode) for KubeVirt development and debug. Visual Studio Code is a source code editor developed by Microsoft for Windows, Linux and macOS. It includes support for debugging, embedded Git control, syntax highlighting, intelligent code completion, snippets, and code refactoring. Golang InstallationGO installation is required, We can find the binaries in golang page. Golang Linux Installation: After downloading the binaries extract them with the following command: tar -C /usr/local -xzf go$VERSION. $OS-$ARCH. tar. gzNow let’s Add /usr/local/go/bin to the PATH environment variable. You can do this by adding this line to your /etc/profile (for a system-wide installation) or $HOME/. profile: export PATH=$PATH:/usr/local/go/binGolang Windows Installation: Open the MSI file and follow the prompts to install the Go tools. By default, the installer puts the Go distribution in C:\Go. The installer should put the C:\Go\bin directory in your PATH environment variable. You may need to restart any open command prompts for the change to take effect. VSCODE InstallationNow we will install Visual Studio Code in our system. For linux machines: We need to choose our linux distribution. For RHEL/Centos/Fedora: The following script will install the key and repository: sudo rpm --import https://packages. microsoft. com/keys/microsoft. ascsudo sh -c 'echo -e [code]\nname=Visual Studio Code\nbaseurl=https://packages. microsoft. com/yumrepos/vscode\nenabled=1\ngpgcheck=1\ngpgkey=https://packages. microsoft. com/keys/microsoft. asc > /etc/yum. repos. d/vscode. repo'Then update the package cache and install the package using dnf (Fedora 22 and above): dnf check-updatesudo dnf install codeOr on older versions using yum: yum check-updatesudo yum install codeFor Debian/Ubuntu: We need to download the . deb package from the vscode download page,and from the command line run the package management. sudo dpkg -i <file>. debsudo apt-get install -f # Install dependenciesFor Windows machinesDownload the Visual Studio Code installer, and then run the installer (VSCodeSetup-version. exe) Go Project structLet’s create the following structure for our kubevirt project development environment: ├── <Go-projects-folder> # Your Golang projects root folder│ ├── bin│ ├── pkg│ ├── src│ │ ├── kubevirt. ioNow navigate to kubevirt. io folder and run: git clone <kubevirt-fork>Install VSCODE ExtensionsNow we are going to install some extensions for a better development experience with the IDE. Open vscode and select your go project root folder you created in the last step. On the extensions tab (Ctrl+Shift+X), search for golang and install it. Now open the command palette (Ctrl+Shift+P) view->Command Palette and type “Go: install/update tools”, this will install all the requirements for example: delve debugger, etc… (optional) We can install docker extension for syntax highlighting, commands, etc. . GOPATH and GOROOT configurationOpen the vscode configuration file (ctrl+,) file->preferences->settings. Now on the right file we need to add this configuration: go. gopath : <Go-projects-folder> , go. goroot : /usr/local/go , Create debug configurationFor the last part we are going to configure the debugger file, open it by Debug->Open Configurations and add to the configuration list the following structure ** Change the parameter to your golang projects root directory { name : Kubevirt , type : go , request : launch , mode : debug , remotePath : , port : 2345, host : 127. 0. 0. 1 , program : ${fileDirname} , env : {}, args : [ --kubeconfig , cluster/k8s-1. 9. 3/. kubeconfig , --port , 1234 ], showLog : true, cwd : ${workspaceFolder}/src/kubevirt. io/kubevirt , output : <Go-projects-folder>/bin/${fileBasenameNoExtension} ,} Debug ProcessFor debug we need to open the main package we want to debug. For example if we want to debug the virt-api component, open the main package: kubevirt. io/cmd/virt-api/virt-api. go Now change to debug view (ctrl+shift+D), check that we are using the kubevirt configuration and hit the play button More Information: For more information, keyboard shortcuts and advance vscode usage please refer the following link editor code basics " }, { - "id": 141, + "id": 140, "url": "/2018/ovn-multi-network-plugin-for-kubernetes-kubetron.html", "title": "Ovn Multi Network Plugin For Kubernetes Kubetron", "author" : "phoracek", "tags" : "ovn, kubetron, network, neutron", "body": "Kubernetes networking model is suited for containerized applications, based mostly around L4 and L7 services, where all pods are connected to one big network. This is perfectly ok for most use cases. However, sometimes there is a need for fine-grained network configuration with better control. Use-cases such as L2 networks, static IP addresses, interfaces dedicated for storage traffic etc. For such needs there is ongoing effort in Kubernetes sig-network to support multiple networks (see Kubernetes Network CRD De-Facto Standard. There exist many prototypes of plugins providing such functionality. You are reading about one of them. Kubetron (working name, kubernetes + neutron, quite misleading since we want to support setup without Neutron involved too), allows users to connect their pods to multiple networks configured on OVN. Important part here is, that such networks are configured by an external tool, be it OVN Northbound Database client or higher level tool such as Neutron or oVirt Provider OVN. This allows administrators to configure complicated networks, Kubernetes then only knows enough about the known networks to be able to connect to them - but not all the complexity involved to manage them. Kubetron does not affect default Kubernetes networking at all, default networks will be left intact. In order to enable the use-cases outlined above, Kubetron can be used to provide multiple interfaces to a pod, further more KubeVirt will then use those interfaces to pass them to its virtual machines via the in progress VirtualMachine networking API. You can find source code in Kubetron GitHub repository. Contents: Desired Model and Usage Proof of Concept Demo Try it Yourself Looking for Help DisclaimerDesired Model and Usage: Let’s talk about how Kubetron looks from administrator’s and user’s point of view. Please note that following examples are still for the desired state and some of them might not be implemented in PoC yet. If you want to learn more about deployment and architecture, check Kubetron slide deck. Configure OVN Networks: First of all, administrator must create and configure networks in OVN. That could be done either directly on OVN northbound database (e. g. using ovn-nbctl) or via OVN manager (e. g. Neutron or oVirt Provider OVN, using ansible). Expose Available Networks: Once the networks are configured, there are two options how to expose available networks to a user. First one is providing some form of access to OVN or Neutron API, this one is completely out of Kubernetes’ and Kubetron’sscope. Second option is to enable Network object support (as described in Kubernetes Network CRD De-Facto standard). With this option, administrator must create a Network object per each OVN network is allowed to be used by a user. This object allows administrator to expose only limited subset of networks or to limit access per Namespace. This process could be automated, e. g. via a service that monitors available logical switches and exposes them as Networks. # List networks (Logical Switches) directly from OVN Northbound databaseovn-nbctl ls-list# List networks available on Neutronneutron net-list# List networks as Network objects created in Kuberneteskubectl get networksAttach pod to a Network: Once user selects a desired network based on options described in previous section, he or she can request them for a pod using an annotation. This annotation is compatible with earlier mentioned Kubernetes Network CRD De-Facto Standard. apiVersion: v1kind: podmetadata: name: network-consumer annotations: kubernetes. v1. cni. cncf. io/networks: red # requested networksspec: containers: - name: busybox image: busyboxAccess the Network from the pod: Once the pod is created, a user can list its interfaces and their assigned IP addresses: $ kubectl exec -it network-consumer -- ip address. . . 10: red-bcxoeffrsw: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1442 qdisc noqueue state UNKNOWN qlen 1000 link/ether 4e:71:3b:ee:a5:f4 brd ff:ff:ff:ff:ff:ff inet 10. 1. 0. 3/24 brd 10. 1. 0. 255 scope global dynamic red-bcxoeffrsw valid_lft 86371sec preferred_lft 86371sec inet6 fe80::4c71:3bff:feee:a5f4/64 scope link valid_lft forever preferred_lft forever. . . In order to make it easier to obtain the network’s interface name inside pod’s containers, environment variables with network-interface mapping are created: $ echo $NETWORK_INTERFACE_REDred-bcxoeffrswProof of Concept: As for now, current implementation does not completely implement the desired model yet: Only Neutron mode is implemented, Kubetron can not be used with OVN alone Network object handling is not implemented, Kubetron obtains networks directly from Neutron Interface names are not exposed as environment variablesIt might be unstable and there are some missing parts. However, basic scenario works, at least in development environment. Demo: In the following recording we create two networks red and blue using Neutron API via Ansible. Then we create two pods and connect them to both mentioned networks. And then we ping. Try it Yourself: I encourage you to try Kubetron yourself. It has not yet been tested on regular Kubernetes deployment (and it likely won’t work without some tuning). Fortunately, Kubetron repository contains Vagrant file and set of scripts that will help you deploy multi-node Kuberneteswith OVN and Kubetron installed. On top of that it describes how to create networks and connect pods to them. Check out Kubetron README. md and give it a try! Looking for Help: If you are interested in contributing to Kubetron, please follow its GitHub repository. There are many missing features and possible improvements, I will open issues to track them soon. Stay tuned! Disclaimer: Kubetron is in early development stage, both it’s architecture and tools to use it will change. " }, { - "id": 142, + "id": 141, "url": "/2018/Use-GlusterFS-Cloning-with-KubeVirt.html", "title": "Use Glusterfs Cloning With Kubevirt", "author" : "karmab", "tags" : "glusterfs, storage", "body": "Gluster seems like a good fit for storage in kubernetes and in particular in kubevirt. Still, as for other storage backends, we will likely need to use a golden set of images and deploy vms from them. That’s where cloning feature of gluster comes at rescue! Contents: Prerequisites Installing Gluster provisioner Using The cloning feature ConclusionPrerequisites: I assume you already have a running instance of openshift and kubevirt along with gluster and an already existing pvc where you copied a base operating system ( you can get those from here) For reference, I used the following components and versions: 3 baremetal servers with Rhel 7. 4 as base OS OpenShift and CNS 3. 9 KubeVirt latestInstalling Gluster provisioner: initial deployment: We will deploy the custom provisioner using this template, along with cluster rules located in this file Note that we also patch the image to use an existing one from gluster org located at docker. io instead of quay. io, as the corresponding repository is private by the time of this writing, and the heketi one, to make sure it has the code required to handle cloning NAMESPACE= app-storage oc create -f openshift-clusterrole. yamloc process -f glusterfile-provisioner-template. yml | oc apply -f - -n $NAMESPACEoc adm policy add-cluster-role-to-user cluster-admin -z glusterfile-provisioner -n $NAMESPACEoc adm policy add-scc-to-user privileged -z glusterfile-provisioneroc set image dc/heketi-storage heketi=gluster/heketiclone:latest -n $NAMESPACEoc set image dc/glusterfile-provisioner glusterfile-provisioner=gluster/glusterfileclone:latest -n $NAMESPACEAnd you will see something similar to this in your storage namespace [root@master01 ~]# NAMESPACE= app-storage [root@master01 ~]# kubectl get pods -n $NAMESPACENAME READY STATUS RESTARTS AGEglusterfile-provisioner-3-vhkx6 1/1 Running 0 1dglusterfs-storage-b82x4 1/1 Running 1 23dglusterfs-storage-czthc 1/1 Running 0 23dglusterfs-storage-z68hm 1/1 Running 0 23dheketi-storage-2-qdrks 1/1 Running 0 6hadditional configuration: for the custom provisioner to work, we need two additional things: a storage class pointing to it, but also containing the details of the current heketi installation a secret similar to the one used by the current heketi installation, but using a different typeYou can use the following NAMESPACE= app-storage oc get sc glusterfs-storage -o yamloc get secret heketi-storage-admin-secret -n $NAMESPACE-o yamlthen, create the following objects: glustercloning-heketi-secret secret in your storage namespace glustercloning storage classfor reference, here are samples of those files. Note how we change the type for the secret and add extra options for our storage class (in particular, enabling smartclone). apiVersion: v1data: key: eEt0NUJ4cklPSmpJb2RZcFpqVExSSjUveFV5WHI4L0NxcEtMME1WVlVjQT0=kind: Secretmetadata: name: glustercloning-heketi-secret namespace: app-storagetype: gluster. org/glusterfileapiVersion: storage. k8s. io/v1kind: StorageClassmetadata: name: glustercloningparameters: restsecretname: glustercloning-heketi-secret restsecretnamespace: app-storage resturl: http://heketi-storage. 192. 168. 122. 10. xip. io restuser: admin smartclone: true snapfactor: 10 volumeoptions: group virtprovisioner: gluster. org/glusterfilereclaimPolicy: DeleteThe full set of supported parameters can be found here Using the cloning feature: Once deployed, you can now provision pvcs from a base origin Cloning single pvcs: For instance, provided you have an existing pvc named cirros containing this base operating system, and that this PVC contains an annotion of the following (. . . )metadata: annotations: gluster. org/heketi-volume-id: f0cbbb29ef4202c5226f87708da57e5c(. . . )A cloned pvc can be created with the following yaml ( note that we simply indicate a clone request in the annotations) apiVersion: v1kind: PersistentVolumeClaimmetadata: name: testclone1 namespace: default annotations: k8s. io/CloneRequest: cirrosspec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: glustercloningstatus: accessModes: - ReadWriteOnce capacity: storage: 1GiOnce provisioned, the pvc will contain this additional annotation created by the provisioner (. . . )metadata: annotations: k8s. io/CloneOf: cirros(. . . )Leveraging the feature in openshift templates: We can make direct use of the feature in this openshift template which would create the following objects: a persistent volume claim as a clone of an existing pvc (cirros by default) an offline virtual machine object additional services for ssh and http accessyou can use it with something like oc process -f template. yml -p Name=myvm | oc process -f - -n defaultConclusion: Cloning features in the storage backend allow us to simply use a given set of pvcs as base OS for the deployment of our vms. This feature is growing in gluster, worth giving it a try! " }, { - "id": 143, + "id": 142, "url": "/2018/KubeVirt-API-Access-Control.html", "title": "Kubevirt Api Access Control", "author" : "davidvossel", "tags" : "api, rbac, roles", "body": "Access to KubeVirt resources are controlled entirely by Kubernete’s ResourceBased Access Control (RBAC) system. This system allows KubeVirt to tie directlyinto the existing authentication and authorization mechanisms Kubernetesalready provides to its core api objects. KubeVirt RBAC Role Basics: Typically, when people think of Kubernetes RBAC system, they’re thinking aboutgranting users access to create/delete kubernetes objects (like Pods,deployments, etc), however those same RBAC mechanisms work naturally withKubeVirt objects as well. When we look at KubeVirt’s objects, we can see they are structured just likethe objects that come predefined in the Kubernetes core. For example, look here’s an example of a VirtualMachine spec. apiVersion: kubevirt. io/v1alpha1kind: VirtualMachinemetadata: name: vm-ephemeralspec: domain: devices: disks: - disk: bus: virtio name: registrydisk volumeName: registryvolume resources: requests: memory: 64M volumes: - name: registryvolume registryDisk: image: kubevirt/cirros-container-disk-demo:develIn the spec above, we see the KubeVirt VirtualMachine object has an apiVersionfield and a kind field just like a Pod spec does. The kubevirt. io portionof the apiVersion field represents KubeVirt apiGroup the resource is a part of. The kind field reflects the resource type. Using that information, we can create an RBAC role that gives a user permissionto create, delete, and view all VirtualMachine objects. apiVersion: rbac. authorization. k8s. io/v1beta1kind: ClusterRolemetadata: name: vm-access labels: kubevirt. io: rules: - apiGroups: - kubevirt. io resources: - virtualmachines verbs: - get - delete - create - update - patch - list - watchThis same logic can be applied when creating RBAC roles for other KubeVirtobjects as well. If we wanted to extend this RBAC role to grant similarpermissions for VirtualMachinePreset objects, we’d just have to add a secondresource kubevirt. io resource list. The result would look like this. apiVersion: rbac. authorization. k8s. io/v1beta1kind: ClusterRolemetadata: name: vm-access labels: kubevirt. io: rules: - apiGroups: - kubevirt. io resources: - virtualmachines - virtualmachinepresets verbs: - get - delete - create - update - patch - list - watchKubeVirt Subresource RBAC Roles: Access to a VirtualMachines’s VNC and console stream using KubeVirt’svirtctl tool is managed by the Kubernetes RBAC system as well. Permissionsfor these resources work slightly different than the other KubeVirt objectsthough. Console and VNC access is performed using the KubeVirt Stream API, which hasits own api group called subresources. kubevirt. io. Below is an example ofhow to create a role that grants access to the VNC and console streams APIs. apiVersion: rbac. authorization. k8s. io/v1beta1kind: ClusterRolemetadata: name: vm-vnc-access labels: kubevirt. io: rules: - apiGroups: - subresources. kubevirt. io resources: - virtualmachines/console - virtualmachines/vnc verbs: - getLimiting RBAC To a Single Namespace. : A ClusterRole can be bound to a user in two different ways. When a ClusterRoleBinding is used, a user is permitted access to all resourcesdefined in the ClusterRole across all namespaces in the cluster. When a RoleBinding is used, a user is limited to accessing only the resourcesdefined in the ClusterRole within the namespace RoleBinding exists in. Limiting RBAC To a Single Resource. : A user can also be limit to accessing only a single resource within a resourcetype. Below is an example that only grants VNC access to the VirtualMachinenamed ‘bobs-vm’ apiVersion: rbac. authorization. k8s. io/v1beta1kind: ClusterRolemetadata: name: vm-vnc-access labels: kubevirt. io: rules: - apiGroups: - subresources. kubevirt. io resources: - virtualmachines/console - virtualmachines/vnc resourceName: - bobs-vm verbs: - getDefault KubeVirt RBAC Roles: The next release of KubeVirt is coming with three default ClusterRoles thatadmins can use to grant users access to KubeVirt resources. In most cases,these roles will prevent admins from ever having to create their own customKubeVirt RBAC roles. More information about these default roles can be found in the KubeVirtuser guide here " }, { - "id": 144, + "id": 143, "url": "/2018/KubeVirt-objects.html", "title": "Kubevirt Objects", "author" : "jcpowermac", "tags" : "custom resources, kubevirt objects, objects, VirtualMachine", "body": "The KubeVirt project provides extensions to Kubernetes via custom resources. These resources are a collection a API objects that defines a virtual machine within Kubernetes. I think it’s important to point out the two great resources that I used tocompile information for this post: user-guide api-referenceWith that let’s take a look at the objects that are available. KubeVirt top-level objectsBelow is a list of the top level API objects and descriptions that KubeVirt provides. VirtualMachine (vm[s]) - represents a virtual machine in the runtime environment of Kubernetes. OfflineVirtualMachine (ovm[s]) - handles the virtual machines that are not running or are in a stopped state. VirtualMachinePreset (vmpreset[s]) - is an extension to general VirtualMachine configuration behaving much like PodPresets from Kubernetes. When a VirtualMachine is created, any applicable VirtualMachinePresets will be applied to the existing spec for the VirtualMachine. This allows for re-use of common settings that should apply to multiple VirtualMachines. VirtualMachineReplicaSet (vmrs[s]) - tries to ensures that a specified number of VirtualMachine replicas are running at any time. DomainSpec is listed as a top-level object but is only used within all of the objects above. Currently the DomainSpec is a subset of what is configurable via libvirt domain XML. VirtualMachine: VirtualMachine is mortal object just like aPod within Kubernetes. It only runs once and cannot be resurrected. This might seem problematic especiallyto an administrator coming from a traditional virtualization background. Fortunatelylater we will discuss OfflineVirtualMachines which will address this. First let’s use kubectl to retrieve a list of VirtualMachine objects. $ kubectl get vms -n nodejs-exNAME AGEmongodb 5dnodejs 5dWe can also use kubectl describe $ kubectl describe vms -n testName: testvmNamespace: testLabels: guest=testvm kubevirt. io/nodeName=kn2. virtomation. com kubevirt. io/size=small. . . output. . . Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal SuccessfulCreate 59m virtualmachine-controller Created virtual machine pod virt-launcher-testvm-8h927 Normal SuccessfulHandOver 59m virtualmachine-controller Pod owner ship transfered to the node virt-launcher-testvm-8h927 Normal Created 59m (x2 over 59m) virt-handler, kn2. virtomation. com VM defined. Normal Started 59m virt-handler, kn2. virtomation. com VM started. And just in case if you want to return the yaml definition of a VirtualMachine object here is an example. $ kubectl -o yaml get vms mongodb -n nodejs-exapiVersion: kubevirt. io/v1alpha1kind: VirtualMachine. . . output. . . The first object we will annotate is VirtualMachine. The important sections . spec for VirtualMachineSpec and . spec. domain for DomainSpec will be annotated only in this section then referred to in the other object sections. apiVersion: kubevirt. io/v1alpha1kind: VirtualMachinemetadata: annotations: {} labels: {} name: string namespace: stringspec: {}Node Placement: Kubernetes has the ability to schedule a pod to specific nodes based on affinity and anti-affinity rules. Node affinity is also possible with KubeVirt. To constrain a virtual machine to run on a node define a matching expressions using node labels. affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - preference: matchExpressions: - key: string operator: string values: - string weight: 0 requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: string operator: string values: - stringA virtual machine can also more easily be constrained by using nodeSelector which is defined by node’s label and value. Here is an example nodeSelector: kubernetes. io/hostname: kn1. virtomation. comClocks and Timers: Configures the virtualize hardware clock provided by QEMU. domain: clock: timezone: string utc: offsetSeconds: 0The timer defines the type and policy attribute that determines what action is take when QEMU misses a deadline for injecting a tick to the guest. domain: clock: timer: hpet: present: true tickPolicy: string hyperv: present: true kvm: present: true pit: present: true tickPolicy: string rtc: present: true tickPolicy: string track: stringCPU and Memory: The number of CPU cores a virtual machine will be assigned. . spec. domain. cpu. cores will not be used for scheduling use . spec. domain. resources. requests. cpu instead. cpu: cores: 1There are two supported resource limits and requests: cpu and memory. A . spec. domain. resources. requests. memory should be defined to determine the allocation of memory provided to the virtual machine. These values will be used to in scheduling decisions. resources: limits: {} requests: {}Watchdog Devices: . spec. domain. watchdog automatically triggers an action via Libvirt and QEMU when the virtual machine operating system hangs or crashes. watchdog: i6300esb: action: string name: stringFeatures: . spec. domain. featuresare hypervisor cpu or machine features that can be enabled. After reviewing both Linux and Microsoft QEMU virtual machines managed byLibvirtboth acpi andapicshould be enabled. The hyperv features should be enabled only for Windows-based virtual machines. For additional information regarding features please visit the virtual hardware configuration in the kubevirt user guide. features: acpi: enabled: true apic: enabled: true endOfInterrupt: true hyperv: relaxed: enabled: true reset: enabled: true runtime: enabled: true spinlocks: enabled: true spinlocks: 0 synic: enabled: true synictimer: enabled: true vapic: enabled: true vendorid: enabled: true vendorid: string vpindex: enabled: trueQEMU Machine Type: . spec. domain. machine. type is the emulated machine architecture provided by QEMU. machine: type: stringHere is an example how to retrieve the supported QEMU machine types. $ qemu-system-x86_64 --machine help Supported machines are: . . . output. . . pc Standard PC (i440FX + PIIX, 1996) (alias of pc-i440fx-2. 10) pc-i440fx-2. 10 Standard PC (i440FX + PIIX, 1996) (default) . . . output. . . q35 Standard PC (Q35 + ICH9, 2009) (alias of pc-q35-2. 10) pc-q35-2. 10 Standard PC (Q35 + ICH9, 2009)Disks and Volumes: . spec. domain. devices. disks configures a QEMU type of disk to the virtual machine and assigns a specific volume and its type to that disk via the volumeName. devices: disks: - cdrom: bus: string readonly: true tray: string disk: bus: string readonly: true floppy: readonly: true tray: string lun: bus: string readonly: true name: string volumeName: stringcloudInitNoCloudinjects scripts and configuration into a virtual machine operating system. There are three different parameters that can be used to provide thecloud-init coniguration: secretRef, userData or userDataBase64. See the user-guide for examples of how to use . spec. volumes. cloudInitNoCloud. volumes: - cloudInitNoCloud: secretRef: name: string userData: string userDataBase64: stringAn emptyDisk volume creates an extra qcow2 disk that is created with the virtual machine. It will be removed if the VirtualMachine object is deleted. emptyDisk: capacity: stringEphemeral volume creates a temporary local copy on write image storage that will be discarded when the VirtualMachine is removed. ephemeral: persistentVolumeClaim: claimName: string readOnly: truename: stringpersistentVolumeClaim volume persists after the VirtualMachine is deleted. persistentVolumeClaim: claimName: string readOnly: trueregistryDisk volume type uses a virtual machine disk that is stored in a container image registry. registryDisk: image: string imagePullSecret: stringVirtual Machine Status: Once the VirtualMachine object has been created the VirtualMachineStatus will be available. VirtualMachineStatus can be used in automation tools such as Ansible to confirm running state, determine where a VirtualMachine is running via nodeName or the ipAddress of the virtual machine operating system. kubectl -o yaml get vm mongodb -n nodejs-ex# . . . output. . . status: interfaces: - ipAddress: 10. 244. 2. 7 nodeName: kn2. virtomation. com phase: RunningExample using --template to retrieve the . status. phase of the VirtualMachine. kubectl get vm mongodb --template {{. status. phase}} -n nodejs-exRunningExamples: https://kubevirt. io/user-guide/virtual_machines/virtual_machine_instances/#virtualmachineinstance-apiOfflineVirtualMachine: An OfflineVirtualMachine is an immortal object within KubeVirt. The VirtualMachinedescribed within the spec will be recreated with a start power operation, host issueor simply a accidental deletion of the VirtualMachine object. For a traditional virtual administrator this object might be appropriate formost use-cases. Just like VirtualMachine we can retrieve the OfflineVirtualMachine objects. $ kubectl get ovms -n nodejs-exNAME AGEmongodb 5dnodejs 5dAnd display the object in yaml. $ kubectl -o yaml get ovms mongodb -n nodejs-exapiVersion: kubevirt. io/v1alpha1kind: OfflineVirtualMachinemetadata:. . . output. . . We continue by annotating OfflineVirtualMachine object. apiVersion: kubevirt. io/v1alpha1kind: OfflineVirtualMachinemetadata: annotations: {} labels: {} name: string namespace: stringspec:What is Running in OfflineVirtualMachine?: . spec. running controls whether the associated VirtualMachine object is created. In other words this changes the power status of the virtual machine. running: trueThis will create a VirtualMachine object which will instantiate and power on a virtual machine. kubectl patch offlinevirtualmachine mongodb --type merge -p '{ spec :{ running :true }}' -n nodejs-exThis will delete the VirtualMachine object which will power off the virtual machine. kubectl patch offlinevirtualmachine mongodb --type merge -p '{ spec :{ running :false }}' -n nodejs-exAnd if you would rather not have to remember the kubectl patch command abovethe KubeVirt team has provided a cli tool virtctl that can start and stopa guest. . /virtctl start mongodb -n nodejs-ex. /virtctl stop mongodb -n nodejs-exOffline Virtual Machine Status: Once the OfflineVirtualMachine object has been created the OfflineVirtualMachineStatus will be available. Like VirtualMachineStatus OfflineVirtualMachineStatus can be used for automation tools such as Ansible. kubectl -o yaml get ovms mongodb -n nodejs-ex# . . . output. . . status: created: true ready: trueExample using --template to retrieve the . status. conditions[0]. type of OfflineVirtualMachine. kubectl get ovm mongodb --template {{. status. ready}} -n nodejs-extrueVirtualMachineReplicaSet: VirtualMachineReplicaSet is great when you want to run multiple identical virtual machines. Just like the other top-level objects we can retrieve VirtualMachineReplicaSet. $ kubectl get vmrs -n nodejs-exNAME AGEreplica 1mWith the replicas parameter set to 2 the command below displays the two VirtualMachine objects that were created. $ kubectl get vms -n nodejs-exNAME AGEreplicanmgjl 7mreplicarjhdz 7mPause rollout: The . spec. paused parameter if true pauses the deployment of the VirtualMachineReplicaSet. paused: trueReplica quantity: The . spec. replicas number of VirtualMachine objects that should be created. replicas: 0The selector must be defined and match labels defined in the template. It is used by the controller to keep track of managed virtual machines. selector: matchExpressions: - key: string operator: string values: - string matchLabels: {}Virtual Machine Template Spec: The VMTemplateSpec is the definition of a VirtualMachine objects that will be created. In the VirtualMachine section the . spec VirtualMachineSpec describes the available parameters for that object. template: metadata: annotations: {} labels: {} name: string namespace: string spec: {}Replica Status: Like the other objects we already have discussed VMReplicaSetStatus is an important object to use for automation. status: readyReplicas: 0 replicas: 0Example using --template to retrieve the . status. readyReplicas and . status. replicas of VirtualMachineReplicaSet. $ kubectl get vmrs replica --template {{. status. readyReplicas}} -n nodejs-ex2$ kubectl get vmrs replica --template {{. status. replicas}} -n nodejs-ex2Examples: https://kubevirt. io/user-guide/virtual_machines/replicaset/#exampleVirtualMachinePreset: This is used to define a DomainSpec that can be used for multiple virtual machines. To configure a DomainSpec for multiple VirtualMachine objects the selector defines which VirtualMachine the VirtualMachinePreset should be applied to. $ kubectl get vmpreset -n nodejs-exNAME AGEm1. small 17sDomain Spec: See the VirtualMachine section above for annotated details of the DomainSpec object. spec: domain: {}Preset Selector: The selector is optional but if not defined will be applied to all VirtualMachine objects; which is probably not the intended purpose so I recommend always including a selector. selector: matchExpressions: - key: string operator: string values: - string matchLabels: {}Examples: https://kubevirt. io/user-guide/virtual_machines/presets/#examplesWe provided an annotated view into the KubeVirt objects - VirtualMachine, OfflineVirtualMachine, VirtualMachineReplicaSet and VirtualMachinePreset. Hopefully this will help a user of KubeVirt to understand the options and parameters that are currently available when creating a virtual machine on Kubernetes. " }, { - "id": 145, + "id": 144, "url": "/2018/Deploying-VMs-on-Kubernetes-GlusterFS-KubeVirt.html", "title": "Deploying Vms On Kubernetes Glusterfs Kubevirt", "author" : "rwsu", "tags" : "glusterfs, heketi, virtual machine, weavenet", "body": "Kubernetes is traditionally used to deploy and manage containerized applications. Did you know Kubernetes can also be used to deploy and manage virtual machines? This guide will walk you through installing a Kubernetes environment backed by GlusterFS for storage and the KubeVirt add-on to enable deployment and management of VMs. Contents: Prerequisites Known Issues Installing Kubernetes Installing GlusterFS and Heketi using gk-deploy Installing KubeVirt Deploying Virtual MachinesPrerequisites: You should have access to at least three baremetal servers. One server will be the master Kubernetes node and other two servers will be the worker nodes. Each server should have a block device attached for GlusterFS, this is in addition to the ones used by the OS. You may use virtual machines in lieu of baremetal servers. Performance may suffer and you will need to ensure your hardware supports nested virtualization and that the relevant kernel modules are loaded in the OS. For reference, I used the following components and versions: baremetal servers with CentOS version 7. 4 as the base OS latest version of Kubernetes (at the time v1. 10. 1) Weave Net as the Container Network Interface (CNI), v2. 3. 0 gluster-kubernetes master commit 2a2a68ce5739524802a38f3871c545e4f57fa20a KubeVirt v0. 4. 1. Known Issues: You may need to set SELinux to permissive mode prior to running “kubeadm init” if you see failures attributed to etcd in /var/log/audit. log. Prior to installing GlusterFS, you may need to disable firewalld until this issue is resolved: https://github. com/gluster/gluster-kubernetes/issues/471 kubevirt-ansible install may fail in storage-glusterfs role: https://github. com/kubevirt/kubevirt-ansible/issues/219Installing Kubernetes: Create the Kubernetes cluster by using kubeadm. Detailed instructions can be found at https://kubernetes. io/docs/setup/independent/install-kubeadm/. Use Weave Net as the CNI. Other CNIs may work, but I have only tested Weave Net. If you are using only 2 servers as workers, then you will need to allow scheduling of pods on the master node because GlusterFS requires at least three nodes. To schedule pods on the master node, see “Master Isolation” in the kubeadm guide or execute this command: kubectl taint nodes --all node-role. kubernetes. io/master-Move onto the next step when your master and worker nodes are Ready. [root@master ~]# kubectl get nodesNAME STATUS ROLES AGE VERSIONmaster. somewhere. com Ready master 6d v1. 10. 1worker1. somewhere. com Ready <none> 6d v1. 10. 1worker2. somewhere. com Ready <none> 6d v1. 10. 1And all of the pods in the kube-system namespace are Running. [root@master ~]# kubectl get pods -n kube-systemNAME READY STATUS RESTARTS AGEetcd-master. somewhere. com 1/1 Running 0 6dkube-apiserver-master. somewhere. com 1/1 Running 0 6dkube-controller-manager-master. somewhere. com 1/1 Running 0 6dkube-dns-86f4d74b45-glv4k 3/3 Running 0 6dkube-proxy-b6ksg 1/1 Running 0 6dkube-proxy-jjxs5 1/1 Running 0 6dkube-proxy-kw77k 1/1 Running 0 6dkube-scheduler-master. somewhere. com 1/1 Running 0 6dweave-net-ldlh7 2/2 Running 0 6dweave-net-pmhlx 2/2 Running 1 6dweave-net-s4dp6 2/2 Running 0 6dInstalling GlusterFS and Heketi using gluster-kubernetes: The next step is to deploy GlusterFS and Heketi onto Kubernetes. GlusterFS provides the storage system on which the virtual machine images are stored. Heketi provides the REST API that Kubernetes uses to provision GlusterFS volumes. The gk-deploy tool is used to deploy both of these components as pods in the Kubernetes cluster. There is a detailed setup guide for gk-deploy. Note each node must have a raw block device that is reserved for use by heketi and they must not contain any data or be pre-formatted. You can reset your block device to a useable state by running: wipefs -a <path to device>To aid you, below are the commands you will need to run if you are following the setup guide. On all nodes: # Open ports for GlusterFS communicationssudo iptables -I INPUT 1 -p tcp --dport 2222 -j ACCEPTsudo iptables -I INPUT 1 -p tcp --dport 24007 -j ACCEPTsudo iptables -I INPUT 1 -p tcp --dport 24008 -j ACCEPTsudo iptables -I INPUT 1 -p tcp --dport 49152:49251 -j ACCEPT# Load kernel modulessudo modprobe dm_snapshotsudo modprobe dm_thin_poolsudo modprobe dm_mirror# Install glusterfs-fuse and git packagessudo yum install -y glusterfs-fuse gitOn the master node: # checkout gluster-kubernetes repogit clone https://github. com/gluster/gluster-kubernetescd gluster-kubernetes/deployBefore running the gk-deploy script, we need to first create a topology. json file that maps the nodes present in the GlusterFS cluster and the block devices attached to each node. The block devices should be raw and unformatted. Below is a sample topology. json file for a 3 node cluster all operating in the same zone. The gluster-kubernetes/deploy directory also contains a sample topology. json file. # topology. json{ clusters : [ { nodes : [ { node : { hostnames : { manage : [ master. somewhere. com ], storage : [ 192. 168. 10. 100 ] }, zone : 1 }, devices : [ /dev/vdb ] }, { node : { hostnames : { manage : [ worker1. somewhere. com ], storage : [ 192. 168. 10. 101 ] }, zone : 1 }, devices : [ /dev/vdb ] }, { node : { hostnames : { manage : [ worker2. somewhere. com ], storage : [ 192. 168. 10. 102 ] }, zone : 1 }, devices : [ /dev/vdb ] } ] } ]}Under “hostnames”, the node’s hostname is listed under “manage” and its IP address is listed under “storage”. Multiple block devices can be listed under “devices”. If you are using VMs, the second block device attached to the VM will usually be /dev/vdb. For multi-path, the device path will usually be /dev/mapper/mpatha. If you are using a second disk drive, the device path will usually be /dev/sdb. Once you have your topology. json file and saved it in gluster-kubernetes/deploy, we can execute gk-deploy to create the GlusterFS and Heketi pods. You will need to specify an admin-key which will be used in the next step and will be discovered during the KubeVirt installation. # from gluster-kubernetes/deploy. /gk-deploy -g -v -n kube-system --admin-key my-admin-keyAdd the end of the installation, you will see: heketi is now running and accessible via http://10. 32. 0. 4:8080 . To runadministrative commands you can install 'heketi-cli' and use it as follows: # heketi-cli -s http://10. 32. 0. 4:8080 --user admin --secret '<ADMIN_KEY>' cluster listYou can find it at https://github. com/heketi/heketi/releases . Alternatively,use it from within the heketi pod: # /usr/bin/kubectl -n kube-system exec -i heketi-b96c7c978-dcwlw -- heketi-cli -s http://localhost:8080 --user admin --secret '<ADMIN_KEY>' cluster listFor dynamic provisioning, create a StorageClass similar to this:\Take note of the URL for Heketi which will be used next step. If successful, 4 additional pods will be shown as Running in the kube-system namespace. [root@master deploy]# kubectl get pods -n kube-systemNAME READY STATUS RESTARTS AGE. . . snip. . . glusterfs-h4nwf 1/1 Running 0 6dglusterfs-kfvjk 1/1 Running 0 6dglusterfs-tjm2f 1/1 Running 0 6dheketi-b96c7c978-dcwlw 1/1 Running 0 6d. . . snip. . . Installing KubeVirt and setting up storage: The final component to install and which will enable us to deploy VMs on Kubernetes is KubeVirt. We will use kubevirt-ansible to deploy KubeVirt which will also help us configure a Secret and a StorageClass that will allow us to provision Persistent Volume Claims (PVCs) on GlusterFS. Let’s first clone the kubevirt-ansible repo. git clone https://github. com/kubevirt/kubevirt-ansiblecd kubevirt-ansibleEdit the inventory file in the kubevirt-ansible checkout. Modify the section that starts with “#BEGIN CUSTOM SETTINGS”. As an example using the servers from above: # BEGIN CUSTOM SETTINGS[masters]# Your master FQDNmaster. somewhere. com[etcd]# Your etcd FQDNmaster. somewhere. com[nodes]# Your nodes FQDN'sworker1. somewhere. comworker2. somewhere. com[nfs]# Your nfs server FQDN[glusterfs]# Your glusterfs nodes FQDN# Each node should have the glusterfs_devices variable, which# points to the block device that will be used by gluster. master. somewhere. comworker1. somewhere. comworker1. somewhere. com## If you run openshift deployment# You can add your master as schedulable node with option openshift_schedulable=true# Add at least one node with lable to run on it router and docker containers# openshift_node_labels= {'region': 'infra','zone': 'default'} # END CUSTOM SETTINGSNow let’s run the kubevirt. yml playbook: ansible-playbook -i inventory playbooks/kubevirt. yml -e cluster=k8s -e storage_role=storage-glusterfs -e namespace=kube-system -e glusterfs_namespace=kube-system -e glusterfs_name= -e heketi_url=http://10. 32. 0. 4:8080 -vIf successful, we should see 7 additional pods as Running in the kube-system namespace. [root@master kubevirt-ansible]# kubectl get pods -n kube-systemNAME READY STATUS RESTARTS AGEvirt-api-785fd6b4c7-rdknl 1/1 Running 0 6dvirt-api-785fd6b4c7-rfbqv 1/1 Running 0 6dvirt-controller-844469fd89-c5vrc 1/1 Running 0 6dvirt-controller-844469fd89-vtjct 0/1 Running 0 6dvirt-handler-78wsb 1/1 Running 0 6dvirt-handler-csqbl 1/1 Running 0 6dvirt-handler-hnlqn 1/1 Running 0 6dDeploying Virtual Machines: To deploy a VM, we must first grab a VM image in raw format, place the image into a PVC, define the VM in a yaml file, source the VM definition into Kubernetes, and then start the VM. The containerized data importer (CDI) is usually used to import VM images into Kubernetes, but there are some patches and additional testing to be done before the CDI can work smoothly with GlusterFS. For now, we will be placing the image into the PVC using a Pod that curls the image from the local filesystem using httpd. On master or on a node where kubectl is configured correctly install and start httpd. sudo yum install -y httpdsudo systemctl start httpdDownload the cirros cloud image and convert it into raw format. curl http://download. cirros-cloud. net/0. 4. 0/cirros-0. 4. 0-x86_64-disk. img -o /var/www/html/cirros-0. 4. 0-x86_64-disk. imgsudo yum install -y qemu-imgqemu-img convert /var/www/html/cirros-0. 4. 0-x86_64-disk. img /var/www/html/cirros-0. 4. 0-x86_64-disk. rawCreate the PVC to store the cirros image. cat <<EOF | kubectl create -f -apiVersion: v1kind: PersistentVolumeClaimmetadata: name: gluster-pvc-cirros annotations: volume. beta. kubernetes. io/storage-class: kubevirtspec: accessModes: - ReadWriteOnce resources: requests: storage: 5GiEOFCheck the PVC was created and has “Bound” status. [root@master ~]# kubectl get pvcNAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGEgluster-pvc-cirros Bound pvc-843bd508-4dbf-11e8-9e4e-149ecfc53021 5Gi RWO kubevirt 2mCreate a Pod to curl the cirros image into the PVC. Note: You will need to substitute with actual hostname or IP address. cat <<EOF | kubectl create -f -apiVersion: v1kind: Podmetadata: name: image-importer-cirrosspec: restartPolicy: OnFailure containers: - name: image-importer-cirros image: kubevirtci/disk-importer env: - name: CURL_OPTS value: -L - name: INSTALL_TO value: /storage/disk. img - name: URL value: http://<hostname>/cirros-0. 4. 0-x86_64-disk. raw volumeMounts: - name: storage mountPath: /storage volumes: - name: storage persistentVolumeClaim: claimName: gluster-pvc-cirrosEOFCheck and wait for the image-importer-cirros Pod to complete. [root@master ~]# kubectl get podsNAME READY STATUS RESTARTS AGEimage-importer-cirros 0/1 Completed 0 28sCreate a Virtual Machine definition for your VM and source it into Kubernetes. Note the PVC containing the cirros image must be listed as the first disk under spec. domain. devices. disks. cat <<EOF | kubectl create -f -apiVersion: kubevirt. io/v1alpha2kind: VirtualMachinemetadata: creationTimestamp: null labels: kubevirt. io/ovm: cirros name: cirrosspec: running: false template: metadata: creationTimestamp: null labels: kubevirt. io/ovm: cirros spec: domain: devices: disks: - disk: bus: virtio name: pvcdisk volumeName: cirros-pvc - disk: bus: virtio name: cloudinitdisk volumeName: cloudinitvolume machine: type: resources: requests: memory: 64M terminationGracePeriodSeconds: 0 volumes: - cloudInitNoCloud: userDataBase64: IyEvYmluL3NoCgplY2hvICdwcmludGVkIGZyb20gY2xvdWQtaW5pdCB1c2VyZGF0YScK name: cloudinitvolume - name: cirros-pvc persistentVolumeClaim: claimName: gluster-pvc-cirrosstatus: {}Finally start the VM. export VERSION=v0. 4. 1curl -L -o virtctl https://github. com/kubevirt/kubevirt/releases/download/$VERSION/virtctl-$VERSION-linux-amd64chmod +x virtctl. /virtctl start cirrosWait for the VM pod to be in “Running” status. [root@master ~]# kubectl get podsNAME READY STATUS RESTARTS AGEimage-importer-cirros 0/1 Completed 0 28svirt-launcher-cirros-krvv2 0/1 Running 0 13sOnce it is running, we can then connect to its console. . /virtctl console cirrosPress enter if a login prompt doesn’t appear. " }, { - "id": 146, + "id": 145, "url": "/2018/changelog-v0.5.0.html", "title": "KubeVirt v0.5.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 5. 0: Released on: Fri May 4 18:25:32 2018 +0200 Better controller health signaling Better virtctl error messages Improvements to enable CRI-O support Run CI on stable OpenShift Add test coverage for multiple PVCs Improved controller life-cycle guarantees Add Webhook validation Add tests coverage for node eviction OfflineVirtualMachine status improvements RegistryDisk API update" }, { - "id": 147, + "id": 146, "url": "/2018/Deploying-KubeVirt-on-a-Single-oVirt-VM.html", "title": "Deploying Kubevirt On A Single Ovirt Vm", "author" : "awels", "tags" : "ovirt, openshift", "body": "In this blog post we are exploring the possibilities of deploying KubeVirt on top of OpenShift which is running inside an oVirt VM. First we must prepare the environment. In my testing I created a VM with 4 cpus, 14G memory and a 100G disk. I then installed CentOS 7. 4 minimal on it. I also have nested virtualizationenabled on my hosts, so any VMs I create can run VMs inside them. These instructions are specific to oVirt, however if you are running another virtualizationplatform that can nested virtualization this will also work. For this example I chose to use a single VM for everything, but I could have done different VMs for my master/nodes/storage/etc, for simplicity I used a singleVM. Preparing the VM: First we will need to enable epel and install some needed tools, like git to get at the source, and ansible to do the deploy: As root: $ yum -y install epel-release$ yum -y install ansible git wgetoptionalInstall ovirt-guest-agent so you can see information in your oVirt admin view. As root: $ yum -y install ovirt-guest-agent$ systemctl start ovirt-guest-agent$ systemctl enable ovirt-guest-agentMake a template out of the VM, so if something goes wrong you have a good starting point to try again. Make sure the VM has a fully qualified domain name, using either DNS or editing /etc/hosts. As we are going to install openshift we will need to install the openshift client tooling from openshift githubin this article I opted to simply copy the oc command into /usr/bin, but anywhere in your path will do. Alternatively you can add oc to your PATH. As root: $ wget https://github. com/openshift/origin/releases/download/v3. 9. 0/openshift-origin-client-tools-v3. 9. 0-191fece-linux-64bit. tar. gz$ tar zxvf openshift-origin-client-tools-v3. 9. 0-191fece-linux-64bit. tar. gz$ cp openshift-origin-client-tools-v3. 9. 0-191fece-linux-64bit/oc /usr/binNext we will install docker and configure it for use with open shift. As root: $ yum -y install dockerWe need to setup an insecure registry in docker before we can start open shift. To do this we must add:INSECURE_REGISTRY=”–insecure-registry 172. 30. 0. 0/16”to the end of /etc/sysconfig/docker Now we can start docker. As root: $ systemctl start docker$ systemctl enable dockerNow we are ready to test if we can bring our cluster to up. As root: $ oc cluster upInstalling KubeVirt with Ansible: Now that we have everything configured we can the rest as a regular user. Also note that if you had an existing cluster you can could have skipped the previous section. Clone the kube-virt ansible repo, and setup the ansible galaxy roles needed to deploy. As user: $ git clone https://github. com/kubevirt/kubevirt-ansible$ cd kubevirt-ansible$ mkdir $HOME/galaxy-roles$ ansible-galaxy install -p $HOME/galaxy-roles -r requirements. yml$ export ANSIBLE_ROLES_PATH=$HOME/galaxy-rolesNow that we are in the kubevirt-ansible directory, we have to edit the inventory file on where we are going to deploy the different open shift nodes. Because we opted to install everything on a single VM the FQDN we enter is the same as the one we defined for our VM. Had we had different nodes we wouldenter the FQDN of each in the inventory file. Lets assume our VMs FQDN is kubevirt. demo, we would changed the inventory file as follows: As user: [masters]kubevirt. demo[etcd]kubevirt. demo[nodes]kubevirt. demo openshift_node_labels= {'region': 'infra','zone': 'default'} openshift_schedulable=true[nfs]kubevirt. demoIn order to allow ansible to ssh into the box using ssh keys instead of a password we will need to generate some, assuming we don’t have theseconfigured already: As root: $ ssh-keygen -t rsaFill out the information in the questions, which will generate two files in /root/. ssh, id_rsa and id_rsa. pub. The id_rsa. pub is the public key which will allowssh to verify your identify when you ssh into a machine. Since we are doing all of this on the same machine, we can simply append the contents ofid_rsa. pub to authorized_keys in /root/. ssh. If that file doesn’t exist you can simply copy id_rsa. pub to authorized_keys. If you are deploying to multiple hostsyou need to append the contents of id_rsa. pub on each host. Next we need to configure docker storage, one can write a whole book about how to do that, so I will post a link https://docs. okd. io/1. 5/install_config/install/host_preparation. html#configuring-docker-storage to the installation document and for now go with the defaults which are not recommended for production, but since this is an introduction its fine. As root: $ docker-storage-setupLets double check the cluster is up before we start running the ansible play books. As root: $ oc cluster upInstall kubernetes. As root: $ ansible-playbook -i inventory playbooks/cluster/kubernetes/config. ymlDisable selinux on all hosts, this hopefully won’t be needed in the future. As root: $ ansible-playbook -i inventory playbooks/selinux. ymllog in as admin to give developer user rights. As root: $ oc login -u system:admin$ oc adm policy add-cluster-role-to-user cluster-admin developerLog in as the developer user. As user: $ oc login -u developerThe password for the developer user is developer. Now finally deploy kubevirt. As user: $ ansible-playbook -i localhost playbooks/kubevirt. yml -e@vars/all. ymlVerify that the pods are running, you should be in the kube-system namespace, if not switch with oc project kube-system. As user: $ kubectl get podsNAME READY STATUS RESTARTS AGEvirt-api-747745669-mswk8 1/1 Running 0 10mvirt-api-747745669-t9dsp 1/1 Running 0 10mvirt-controller-648945bbcb-ln7dv 1/1 Running 0 10mvirt-controller-648945bbcb-nxrj8 0/1 Running 0 10mvirt-handler-6zh77 1/1 Running 0 10mNow that we have KubeVirt up and running we are ready to try to start a VM. Let’s install virtctl to make it easier tostart and stop VMs. The latest available version while writing this was 0. 4. 1. As user: $ export VERSION=v0. 4. 1$ curl -L -o virtctl \ https://github. com/kubevirt/kubevirt/releases/download/$VERSION/virtctl-$VERSION-linux-amd64$ chmod +x virtctlLets grab the demo VM specification from the kubevirt github page. As user: $ kubectl apply -f https://raw. githubusercontent. com/kubevirt/demo/master/manifests/vm. yamlNow we can start the VM. As user: $ . /virtctl start testvmNow a new pod will be running that is controlling the VM. As user: $ kubectl get podsNAME READY STATUS RESTARTS AGEvirt-api-747745669-mswk8 1/1 Running 0 15mvirt-api-747745669-t9dsp 1/1 Running 0 15mvirt-controller-648945bbcb-ln7dv 1/1 Running 0 15mvirt-controller-648945bbcb-nxrj8 0/1 Running 0 15mvirt-handler-6zh77 1/1 Running 0 15mvirt-launcher-testvm-gv5nt 2/2 Running 0 23sCongratulations you now have a VM running in OpenShift using KubeVirt inside an oVirt VM. Useful resources: KubeVirt KubeVirt Ansible Minikube kubevirt Demo Kubectl installation" }, { - "id": 148, + "id": 147, "url": "/2018/This-Week-in-Kube-Virt-23.html", "title": "This Week In Kube Virt 23", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a close-to weekly update from the KubeVirt team. In general there is now more work happening outside of the core kubevirtrepository. We are currently driven by Closing a lot of loose ends Stepping back to identify gaps for 1. 0 Within the last two weeks we achieved to: Release KubeVirt v0. 4. 1 to address some shutdown issues https://github. com/kubevirt/kubevirt/releases/tag/v0. 4. 1 Many VM life-cycle and guarantee fixes (@rmohr @vossel) https://github. com/kubevirt/kubevirt/pull/951 https://github. com/kubevirt/kubevirt/pull/948 https://github. com/kubevirt/kubevirt/pull/935 https://github. com/kubevirt/kubevirt/pull/838 https://github. com/kubevirt/kubevirt/pull/907 https://github. com/kubevirt/kubevirt/pull/883 Pass labels from VM to pod for better Service integration (@rmohr) https://github. com/kubevirt/kubevirt/pull/939 Packaging preparations (@rmohr) https://github. com/kubevirt/kubevirt/pull/941 https://github. com/kubevirt/kubevirt/issues/924 https://github. com/kubevirt/kubevirt/pull/950 Controller readiness clarifications (@rmohr) https://github. com/kubevirt/kubevirt/pull/901 Validation improvements using CRD scheme and webhooks (@vossel) Webhook: https://github. com/kubevirt/kubevirt/pull/911 Scheme: https://github. com/kubevirt/kubevirt/pull/850 https://github. com/kubevirt/kubevirt/pull/917 Add Windows tests (@alukiano) https://github. com/kubevirt/kubevirt/pull/809 Improve PVC tests (@petrkotas) https://github. com/kubevirt/kubevirt/pull/862 Enable SELinux in OpenShift CI environment Tests to run KubeVirt on Kubernetes 1. 10 In addition to this, we are also working on: virtctl expose convenience verb (@yuvalif) https://github. com/kubevirt/kubevirt/pull/962 CRIO support in CI virtctl bash/zsh completion (@rmohr) https://github. com/kubevirt/kubevirt/pull/916 Improved error messages from virtctl (@fromanirh) https://github. com/kubevirt/kubevirt/pull/934 Improved validation feedback (@vossel) https://github. com/kubevirt/kubevirt/pull/960 Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 149, + "id": 148, "url": "/2018/KubeVirt-Network-Deep-Dive.html", "title": "Kubevirt Network Deep Dive", "author" : "jcpowermac, booxter", "tags" : "network, flannel, kubevirt-ansible, Skydive", "body": "In this post we will research and discover how KubeVirt networking functions along with Kubernetes objects services and ingress. This should also provide enough technical details to start troubleshooting your own environment if a problem should arise. So with that let’s get started. Remember to also check KubeVirt Network Rehash which provides updates to this article. Component InstallationWe are going to walk through the installation that assisted me to write this post. I have created three CentOS 7. 4 with nested virtualization enabled where Kubernetes will be installed, which is up next. Kubernetes: I am rehashing what is available in Kubernetes documentation just to make it easier to follow along and provide an identical environment that I used to research KubeVirt networking. Packages: Add the Kubernetes repository cat <<EOF > /etc/yum. repos. d/kubernetes. repo[kubernetes]name=Kubernetesbaseurl=https://packages. cloud. google. com/yum/repos/kubernetes-el7-\$basearchenabled=1gpgcheck=1repo_gpgcheck=1gpgkey=https://packages. cloud. google. com/yum/doc/yum-key. gpg https://packages. cloud. google. com/yum/doc/rpm-package-key. gpgEOFUpdate and install prerequisites. yum update -yyum install kubelet-1. 9. 4 \ kubeadm-1. 9. 4 \ kubectl-1. 9. 4 \ docker \ ansible \ git \ curl \ wget -yDocker prerequisites: For docker storage we will use a new disk vdb formatted XFS using the Overlay driver. cat <<EOF > /etc/sysconfig/docker-storage-setupSTORAGE_DRIVER=overlay2DEVS=/dev/vdbCONTAINER_ROOT_LV_NAME=dockerlvCONTAINER_ROOT_LV_SIZE=100%FREECONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/dockerVG=dockervgEOFStart and enable Docker systemctl start dockersystemctl enable dockerAdditional prerequisites: In this section we continue with the required prerequistes. This is also described in the install kubeadm kubernetes documentation. systemctl enable kubeletThis is a requirement for Flannel - pass bridged IPv4 traffic to iptables’ chains cat <<EOF > /etc/sysctl. d/k8s. conf net. bridge. bridge-nf-call-ip6tables = 1 net. bridge. bridge-nf-call-iptables = 1 EOF sysctl --systemTemporarily disable selinux so we can run kubeadm init setenforce 0And let’s also permanently disable selinux - yes I know. If this isn’t done once you reboot your node kubernetes won’t start and then you will be wondering what happened :) cat <<EOF > /etc/selinux/config # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: # enforcing - SELinux security policy is enforced. # permissive - SELinux prints warnings instead of enforcing. # disabled - No SELinux policy is loaded. SELINUX=disabled # SELINUXTYPE= can take one of three two values: # targeted - Targeted processes are protected, # minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection. SELINUXTYPE=targeted EOFInitialize cluster: Now we are ready to create our cluster starting with the first and only master. Note --pod-network-cidr is required for Flannel kubeadm init --pod-network-cidr=10. 244. 0. 0/16. . . output. . . mkdir -p $HOME/. kube sudo cp -i /etc/kubernetes/admin. conf $HOME/. kube/config sudo chown $(id -u):$(id -g) $HOME/. kube/configThere are multiple CNI providers in this example environment just going to use Flannel since its simple to deploy and configure. kubectl apply -f https://raw. githubusercontent. com/coreos/flannel/v0. 9. 1/Documentation/kube-flannel. ymlAfter Flannel is deployed join the nodes to the cluster. kubeadm join --token 045c1c. 04765c236e1bd8da 172. 31. 50. 221:6443 \ --discovery-token-ca-cert-hash sha256:redactedOnce all the nodes have been joined check the status. $ kubectl get nodeNAME STATUS ROLES AGE VERSIONkm1. virtomation. com Ready master 11m v1. 9. 4kn1. virtomation. com Ready <none> 10m v1. 9. 4kn2. virtomation. com Ready <none> 10m v1. 9. 4Additional Components: KubeVirt: The recommended installation method is to use kubevirt-ansible. For this example I don’t require storage so just deploying using kubectl create. For additional information regarding KubeVirt install see the installation readme. $ kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/v0. 4. 1/kubevirt. yamlserviceaccount kubevirt-apiserver created. . . output . . . customresourcedefinition offlinevirtualmachines. kubevirt. io createdLet’s make sure that all the pods are running. $ kubectl get pod -n kube-system -l 'kubevirt. io'NAME READY STATUS RESTARTS AGEvirt-api-747745669-62cww 1/1 Running 0 4mvirt-api-747745669-qtn7f 1/1 Running 0 4mvirt-controller-648945bbcb-dfpwm 0/1 Running 0 4mvirt-controller-648945bbcb-tppgx 1/1 Running 0 4mvirt-handler-xlfc2 1/1 Running 0 4mvirt-handler-z5lsh 1/1 Running 0 4mSkydive: I have used Skydive in the past. It is a great tool to understand the topology of software-defined-networking. The only caveat is that Skydive doesn’t create a complete topology when using Flannel but there is still a good picture of what is going on. So with that let’s go ahead and install. kubectl create ns skydivekubectl create -n skydive -f https://raw. githubusercontent. com/skydive-project/skydive/master/contrib/kubernetes/skydive. yamlCheck the status of Skydive agent and analyzer $ kubectl get pod -n skydiveNAME READY STATUS RESTARTS AGEskydive-agent-5hh8k 1/1 Running 0 5mskydive-agent-c29l7 1/1 Running 0 5mskydive-analyzer-5db567b4bc-m77kq 2/2 Running 0 5mingress-nginx: To provide external access our example NodeJS application we need to an ingress controller. For this example we are going to use ingress-nginx I created a simple script ingress. sh that follows the installation documentation for ingress-nginx with a couple minor modifications: Patch the nginx-configuration ConfigMap to enable vts status Add an additional containerPort to the deployment and an additional port to the service. Create an ingress to access nginx status page The script and additional files are available in the github repo listed below. git clone https://github. com/jcpowermac/kubevirt-network-deepdivecd kubevirt-network-deepdive/kubernetes/ingressbash ingress. shAfter the script is complete confirm that ingress-nginx pods are running. $ kubectl get pod -n ingress-nginxNAME READY STATUS RESTARTS AGEdefault-http-backend-55c6c69b88-jpl95 1/1 Running 0 1mnginx-ingress-controller-85c8787886-vf5tp 1/1 Running 0 1mKubeVirt Virtual MachinesNow, we are at a point where we can deploy our first KubeVirt virtual machines. These instances are where we will install our simple NodeJS and MongoDB application. Create objects: Let’s create a clean new namespace to use. $ kubectl create ns nodejs-exnamespace nodejs-ex createdThe nodejs-ex. yaml contains multiple objects. The definitions for our two virtual machines - mongodb and nodejs. Two Kubernetes Services and a one Kubernetes Ingress object. These instances will be created as offline virtual machines so after kubectl create we will start them up. $ kubectl create -f https://raw. githubusercontent. com/jcpowermac/kubevirt-network-deepdive/master/kubernetes/nodejs-ex. yaml -n nodejs-exofflinevirtualmachine nodejs createdofflinevirtualmachine mongodb createdservice mongodb createdservice nodejs createdingress nodejs createdStart the nodejs virtual machine $ kubectl patch offlinevirtualmachine nodejs --type merge -p '{ spec :{ running :true}}' -n nodejs-exofflinevirtualmachine nodejs patchedStart the mongodb virtual machine $ kubectl patch offlinevirtualmachine mongodb --type merge -p '{ spec :{ running :true}}' -n nodejs-exofflinevirtualmachine mongodb patchedReview kubevirt virtual machine objects $ kubectl get ovms -n nodejs-exNAME AGEmongodb 7mnodejs 7m$ kubectl get vms -n nodejs-exNAME AGEmongodb 4mnodejs 5mWhere are the virtual machines and what is their IP address? $ kubectl get pod -o wide -n nodejs-exNAME READY STATUS RESTARTS AGE IP NODEvirt-launcher-mongodb-qdpmg 2/2 Running 0 4m 10. 244. 2. 7 kn2. virtomation. comvirt-launcher-nodejs-5r59c 2/2 Running 0 4m 10. 244. 1. 8 kn1. virtomation. comNote To test virtual machine to virtual machine network connectivity I purposely set the host where which instance would run by using a nodeSelector. Installing the NodeJS Example Application: To quickly deploy our example application Ansible project is included in the repository. Two inventory files need to be modified before executing ansible-playbook. Within all. yml change the analyzers IP address to what is listed in the command below. $ kubectl get endpoints -n skydiveNAME ENDPOINTS AGEskydive-analyzer 10. 244. 1. 2:9200,10. 244. 1. 2:12379,10. 244. 1. 2:8082 + 1 more. . . 18hAnd finally use the IP Addresses from the kubectl get pod -o wide -n nodejs-ex command (example above) to modify inventory/hosts. ini. Now we can run ansible-playbook. cd kubevirt-network-deepdive/ansiblevim inventory/group_vars/all. ymlvim inventory/hosts. iniansible-playbook -i inventory/hosts. ini playbook/main. yml. . . output . . . Determine Ingress URL: First let’s find the host. This is defined within the Ingress object. In this case it is nodejs. ingress. virtomation. com. $ kubectl get ingress -n nodejs-exNAME HOSTS ADDRESS PORTS AGEnodejs nodejs. ingress. virtomation. com 80 22mWhat are the NodePorts? For this installation Service spec was modified to include nodePort for http (30000) and http-mgmt (32000). Note When deploying ingress-nginx using the provided Service definition the nodePort is undefined. Kubernetes will assign a random port to ports defined in the spec. $ kubectl get service ingress-nginx -n ingress-nginxNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEingress-nginx NodePort 10. 110. 173. 97 <none> 80:30000/TCP,443:30327/TCP,18080:32000/TCP 52mWhat node is the nginx-ingress controller running on? This is needed to configure DNS. $ kubectl get pod -n ingress-nginx -o wideNAME READY STATUS RESTARTS AGE IP NODEdefault-http-backend-55c6c69b88-jpl95 1/1 Running 0 53m 10. 244. 1. 3 kn1. virtomation. comnginx-ingress-controller-85c8787886-vf5tp 1/1 Running 0 53m 10. 244. 1. 4 kn1. virtomation. comConfigure DNS: In my homelab I am using dnsmasq. To support ingress add the host where the controller is running as an A record. [root@dns1 ~]# cat /etc/dnsmasq. d/virtomation. conf. . . output . . . address=/km1. virtomation. com/172. 31. 50. 221address=/kn1. virtomation. com/172. 31. 50. 231address=/kn2. virtomation. com/172. 31. 50. 232# Needed for nginx-ingressaddress=/. ingress. virtomation. com/172. 31. 50. 231. . . output . . . Restart dnsmasq for the new config systemctl restart dnsmasqTesting our application: This application uses MongoDB to store the views of the website. Listing the count-value shows that the database is connected and networking is functioning correctly. $ curl http://nodejs. ingress. virtomation. com:30000/<!doctype html><html lang= en >. . . output. . . <p>Page view count:<span class= code id= count-value >7</span></p>. . . output. . . KubeVirt NetworkingNow that we shown that kubernetes, kubevirt, ingress-nginx and flannel work together how is it accomplished? First let’s go over what is going on in kubevirt specifically. KubeVirt networking virt-launcher - virtwrap: virt-launcher is the pod that runs the necessary components instantiate and run a virtual machine. We are only going to concentrate on the network portion in this post. virtwrap manager: Before the virtual machine is started the preStartHook will run SetupPodNetwork. SetupPodNetwork → SetupDefaultPodNetwork: This function calls three functions that are detailed below discoverPodNetworkInterface, preparePodNetworkInterface and StartDHCP discoverPodNetworkInterface: This function gathers the following information about the pod interface: IP Address Routes Gateway MAC address This is stored for later use in configuring DHCP. preparePodNetworkInterfaces: Once the current details of the pod interface have been stored following operations are performed: Delete the IP address from the pod interface Set the pod interface down Change the pod interface MAC address Set the pod interface up Create the bridge Add the pod interface to the bridge This will provide libvirt a bridge to use for the virtual machine that will be created. StartDHCP → DHCPServer → SingleClientDHCPServer: This DHCP server only provides a single address to a client in this case the virtual machine that will be started. The network details - the IP address, gateway, routes, DNS servers and suffixes are taken from the pod which will be served to the virtual machine. Networking in detailNow that we have a clearier picture of kubevirt networking we will continue with details regarding kubernetes objects, host, pod and virtual machine networking components. Then we will finish up with two scenarios: virtual machine to virtual machine communication and ingress to virtual machine. Kubernetes-level: services: There are two services defined in the manifest that was deployed above. One each for mongodb and nodejs applications. This allows us to use the hostname mongodb to connect to MongoDB. Review DNS for Services and Pods for additional information. $ kubectl get services -n nodejs-exNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEmongodb ClusterIP 10. 108. 188. 170 <none> 27017/TCP 3hnodejs ClusterIP 10. 110. 233. 114 <none> 8080/TCP 3hendpoints: The endpoints below were automatically created because there was a selector spec: selector: kubevirt. io: virt-launcher kubevirt. io/domain: nodejsdefined in the Service object. $ kubectl get endpoints -n nodejs-exNAME ENDPOINTS AGEmongodb 10. 244. 2. 7:27017 1hnodejs 10. 244. 1. 8:8080 1hingress: Also defined in the manifest was the ingress object. This will allow us to contact the NodeJS example application using a URL. $ kubectl get ingress -n nodejs-exNAME HOSTS ADDRESS PORTS AGEnodejs nodejs. ingress. virtomation. com 80 3hHost-level: interfaces: A few important interfaces to note. The flannel. 1 interface is type vxlan for connectivity between hosts. I removed from the ip a output the veth interfaces but the details are shown further below with bridge link show. [root@kn1 ~]# ip a. . . output. . . 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 52:54:00:97:a6:ee brd ff:ff:ff:ff:ff:ff inet 172. 31. 50. 231/24 brd 172. 31. 50. 255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::5054:ff:fe97:a6ee/64 scope link valid_lft forever preferred_lft forever. . . output. . . 4: flannel. 1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN link/ether ce:4e:fb:41:1d:af brd ff:ff:ff:ff:ff:ff inet 10. 244. 1. 0/32 scope global flannel. 1 valid_lft forever preferred_lft forever inet6 fe80::cc4e:fbff:fe41:1daf/64 scope link valid_lft forever preferred_lft forever5: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP qlen 1000 link/ether 0a:58:0a:f4:01:01 brd ff:ff:ff:ff:ff:ff inet 10. 244. 1. 1/24 scope global cni0 valid_lft forever preferred_lft forever inet6 fe80::341b:eeff:fe06:7ec/64 scope link valid_lft forever preferred_lft forever. . . output. . . cni0 is a bridge where one side of the veth interface pair is attached. [root@kn1 ~]# bridge link show6: vethb4424886 state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 27: veth1657737b state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 28: vethdfd32c87 state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 29: vethed0f8c9a state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 210: veth05e4e005 state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 211: veth25933a54 state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 212: vethe3d701e7 state UP @docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master cni0 state forwarding priority 32 cost 2routes: The pod network subnet is 10. 244. 0. 0/16 and broken up per host: km1 - 10. 244. 0. 0/24 kn1 - 10. 244. 1. 0/24 kn2 - 10. 244. 2. 0/24 So the table will route the packets to correct interface. [root@kn1 ~]# ip rdefault via 172. 31. 50. 1 dev eth010. 244. 0. 0/24 via 10. 244. 0. 0 dev flannel. 1 onlink10. 244. 1. 0/24 dev cni0 proto kernel scope link src 10. 244. 1. 110. 244. 2. 0/24 via 10. 244. 2. 0 dev flannel. 1 onlink172. 17. 0. 0/16 dev docker0 proto kernel scope link src 172. 17. 0. 1172. 31. 50. 0/24 dev eth0 proto kernel scope link src 172. 31. 50. 231iptables: To also support kubernetes services kube-proxy writes iptables rules for those services. In the output below you can see our mongodb and nodejs services with destination NAT rules defined. For more information regarding iptables and services refer to debug-service in the kubernetes documentation. [root@kn1 ~]# iptables -n -L -t nat | grep nodejs-exKUBE-MARK-MASQ all -- 10. 244. 1. 8 0. 0. 0. 0/0 /* nodejs-ex/nodejs: */DNAT tcp -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* nodejs-ex/nodejs: */ tcp to:10. 244. 1. 8:8080KUBE-MARK-MASQ all -- 10. 244. 2. 7 0. 0. 0. 0/0 /* nodejs-ex/mongodb: */DNAT tcp -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* nodejs-ex/mongodb: */ tcp to:10. 244. 2. 7:27017KUBE-MARK-MASQ tcp -- !10. 244. 0. 0/16 10. 108. 188. 170 /* nodejs-ex/mongodb: cluster IP */ tcp dpt:27017KUBE-SVC-Z7W465PEPK7G2UVQ tcp -- 0. 0. 0. 0/0 10. 108. 188. 170 /* nodejs-ex/mongodb: cluster IP */ tcp dpt:27017KUBE-MARK-MASQ tcp -- !10. 244. 0. 0/16 10. 110. 233. 114 /* nodejs-ex/nodejs: cluster IP */ tcp dpt:8080KUBE-SVC-LATB7COHB4ZMDCEC tcp -- 0. 0. 0. 0/0 10. 110. 233. 114 /* nodejs-ex/nodejs: cluster IP */ tcp dpt:8080KUBE-SEP-JOPA2J4R76O5OVH5 all -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* nodejs-ex/nodejs: */KUBE-SEP-QD4L7MQHCIVOWZAO all -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* nodejs-ex/mongodb: */Pod-level: interfaces: The bridge br1 is the main focus in the pod level. It contains the eth0 and vnet0 ports. eth0 becomes the uplink to the bridge which is the other side of the veth pair which is a port on the host’s cni0 bridge. Important Since eth0 has no IP address and br1 is in the self-assigned range the pod has no network access. There are also no routes in the pod. This can be resolved for troubleshooting by creating a veth pair, adding one of the interfaces to the bridge and assigning an IP address in the pod subnet for the host. Routes are also required to be added. This is performed for running skydive in the pod see skydive. sh for more details. $ kubectl exec -n nodejs-ex -c compute virt-launcher-nodejs-5r59c -- ip a. . . output. . . 3: eth0@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master br1 state UP group default link/ether a6:97:da:96:cf:07 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::a497:daff:fe96:cf07/64 scope link valid_lft forever preferred_lft forever4: br1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default link/ether 32:8a:f5:59:10:02 brd ff:ff:ff:ff:ff:ff inet 169. 254. 75. 86/32 brd 169. 254. 75. 86 scope global br1 valid_lft forever preferred_lft forever inet6 fe80::a497:daff:fe96:cf07/64 scope link valid_lft forever preferred_lft forever5: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc pfifo_fast master br1 state UNKNOWN group default qlen 1000 link/ether fe:58:0a:f4:01:08 brd ff:ff:ff:ff:ff:ff inet6 fe80::fc58:aff:fef4:108/64 scope link valid_lft forever preferred_lft foreverShowing the bridge br1 member ports. $ kubectl exec -n nodejs-ex -c compute virt-launcher-nodejs-5r59c -- bridge link show3: eth0 state UP @if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master br1 state forwarding priority 32 cost 25: vnet0 state UNKNOWN : <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 master br1 state forwarding priority 32 cost 100DHCP: The virtual machine network is configured by DHCP. You can see virt-launcher has UDP port 67 open on the br1 interface to serve DHCP to the virtual machine. $ kubectl exec -n nodejs-ex -c compute virt-launcher-nodejs-5r59c -- ss -tuapnNetid State Recv-Q Send-Q Local Address:Port Peer Address:Portudp UNCONN 0 0 0. 0. 0. 0%br1:67 0. 0. 0. 0:* users:(( virt-launcher ,pid=10,fd=12))libvirt: With virsh domiflist we can also see that the vnet0 interface is a port on the br1 bridge. $ kubectl exec -n nodejs-ex -c compute virt-launcher-nodejs-5r59c -- virsh domiflist nodejs-ex_nodejsInterface Type Source Model MACvnet0 bridge br1 e1000 0a:58:0a:f4:01:08VM-level: interfaces: Fortunately the vm interfaces are fairly typical. Just the single interface that has been assigned the original pod ip address. Warning The MTU of the virtual machine interface is set to 1500. The network interfaces upstream are set to 1450. [fedora@nodejs ~]$ ip a. . . output. . . 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 0a:58:0a:f4:01:08 brd ff:ff:ff:ff:ff:ff inet 10. 244. 1. 8/24 brd 10. 244. 1. 255 scope global dynamic eth0 valid_lft 86299761sec preferred_lft 86299761sec inet6 fe80::858:aff:fef4:108/64 scope link valid_lft forever preferred_lft foreverDNS: Just quickly wanted to cat the /etc/resolv. conf file to show that DNS is configured so that kube-dns will be properly queried. [fedora@nodejs ~]$ cat /etc/resolv. conf; generated by /usr/sbin/dhclient-scriptsearch nodejs-ex. svc. cluster. local. svc. cluster. local. cluster. local. nameserver 10. 96. 0. 10VM to VM communication: The virtual machines are on differnet hosts. This was done purposely to show that connectivity between virtual machine and hosts. Here we finally get to use Skydive. The real-time topology below along with arrows annotate the flow of packets between the host, pod and virtual machine network devices. VM to VM Connectivity Tests: To confirm connectivity we are going to do a few things. First check for DNS resolution for the mongodb service. Next look a established connection to MongoDB and finally check the NodeJS logs looking for confirmation of database connection. DNS resolution: Service-based DNS resolution is an important feature of Kubernetes. Since dig,host or nslookup are not installed in our virtual machine a quick python script fills in. This output below shows that the mongodb name is available for resolution. [fedora@nodejs ~]$ python3 -c import socket;print(socket. gethostbyname('mongodb. nodejs-ex. svc. cluster. local')) 10. 108. 188. 170[fedora@nodejs ~]$ python3 -c import socket;print(socket. gethostbyname('mongodb')) 10. 108. 188. 170TCP connection: After connecting to the nodejs virtual machine via ssh we can use ss to determine the current TCP connections. We are specifically looking for the established connections to the MongoDB service that is running on the mongodb virtual machine on node kn2. [fedora@nodejs ~]$ ss -tanpState Recv-Q Send-Q Local Address:Port Peer Address:Port. . . output . . . LISTEN 0 128 *:8080 *:*ESTAB 0 0 10. 244. 1. 8:47826 10. 108. 188. 170:27017ESTAB 0 0 10. 244. 1. 8:47824 10. 108. 188. 170:27017. . . output . . . Logs: [fedora@nodejs ~]$ journalctl -u nodejs. . . output. . Apr 18 20:07:37 nodejs. localdomain node[4303]: Connected to MongoDB at: mongodb://nodejs:nodejspassword@mongodb/nodejs. . . output. . . Ingress to VM communication: The topology image below shows the packet flow when using a ingress kubernetes object. The commands below the image will provide additional details. Ingress to VM The kube-proxy has port 30000 open that was defined by the nodePort of the ingress-nginx service. Additional details on kube-proxy and iptables role is available from Service - IPs and VIPs in the Kubernetes documentation. [root@kn1 ~]# ss -tanp | grep 30000LISTEN 0 128 :::30000 :::* users:(( kube-proxy ,pid=6534,fd=13))[root@kn1 ~]# iptables -n -L -t nat | grep ingress-nginx/ingress-nginx | grep http | grep -v https | grep -v http-mgmtKUBE-MARK-MASQ tcp -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* ingress-nginx/ingress-nginx:http */ tcp dpt:30000KUBE-SVC-REQ4FPVT7WYF4VLA tcp -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* ingress-nginx/ingress-nginx:http */ tcp dpt:30000KUBE-MARK-MASQ all -- 10. 244. 1. 4 0. 0. 0. 0/0 /* ingress-nginx/ingress-nginx:http */DNAT tcp -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* ingress-nginx/ingress-nginx:http */ tcp to:10. 244. 1. 4:80KUBE-MARK-MASQ tcp -- !10. 244. 0. 0/16 10. 110. 173. 97 /* ingress-nginx/ingress-nginx:http cluster IP */ tcp dpt:80KUBE-SVC-REQ4FPVT7WYF4VLA tcp -- 0. 0. 0. 0/0 10. 110. 173. 97 /* ingress-nginx/ingress-nginx:http cluster IP */ tcp dpt:80KUBE-SEP-BKJT4JXHZ3TCOTKA all -- 0. 0. 0. 0/0 0. 0. 0. 0/0 /* ingress-nginx/ingress-nginx:http */Since the ingress-nginx pod is on the same host as the nodejs virtual machine we just need to be routed to the cni0 bridge to communicate with the pod and vm. [root@kn1 ~]# ip r. . . output. . . 10. 244. 1. 0/24 dev cni0 proto kernel scope link src 10. 244. 1. 1. . . output. . . Connectivity Tests: In the section where we installed the application we already tested for connectivity but let’s take this is little further to confirm. Nginx Vhost Traffic Status: ingress-nginx provides an optional setting to enable traffic status - which we already enabled. The screenshot below shows the requests that Nginx is receiving for nodejs. ingress. virtomation. com. nginx-vts Service NodePort to Nginx Pod: My tcpdump fu is lacking so I found an example query that will provide the details we are looking for. I removed a significant amount of the content but you can see my desktop (172. 31. 51. 52) create a GET request to the NodePort 30000. This could have also been done in Skydive but I wanted to provide an alternative if you didn’t want to install it or just stick to the cli. # tcpdump -nni eth0 -A -s 0 'tcp port 30000 and (((ip[2:2] - ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0)'. . . output. . . 13:24:52. 197092 IP 172. 31. 51. 52. 36494 > 172. 31. 50. 231. 30000: Flags [P. ], seq 2685726663:2685727086, ack 277056091, win 491, options [nop,nop,TS val 267689990 ecr 151714950], length 423E. . . . @. ?. Z. . . 34. . 2. . . u0. . . . . . . [. . . . r. . . . . . . . . . . . GET / HTTP/1. 1Host: nodejs. ingress. virtomation. com:30000User-Agent: Mozilla/5. 0 (X11; Fedora; Linux x86_64; rv:59. 0) Gecko/20100101 Firefox/59. 0Accept: text/html,application/xhtml+xml,application/xml;q=0. 9,*/*;q=0. 8Accept-Language: en-US,en;q=0. 5Accept-Encoding: gzip, deflateConnection: keep-aliveUpgrade-Insecure-Requests: 1If-None-Match: W/ 9edb-O5JGhneli0eCE6G2kFY5haMKg5k Cache-Control: max-age=013:24:52. 215284 IP 172. 31. 50. 231. 30000 > 172. 31. 51. 52. 36494: Flags [P. ], seq 1:2362, ack 423, win 236, options [nop,nop,TS val 151723713 ecr 267689990], length 2361E. m|. @. ?. . . . . 2. . . 34u0. . . . . [. . . n. . . . . . . . . . . . . . . . . . HTTP/1. 1 200 OK Server: nginx/1. 13. 12 Date: Fri, 20 Apr 2018 13:24:52 GMT Content-Type: text/html; charset=utf-8 Transfer-Encoding: chunked Connection: keep-alive Vary: Accept-Encoding X-Powered-By: Express ETag: W/ 9edb-SZeP35LuygZ9MOrPTIySYOu9sAE Content-Encoding: gzipNginx Pod to NodeJS VM: In (1) we can see flows to and from 10. 244. 1. 4 and 10. 244. 1. 8. . 8 is the nodejs virtual machine and . 4 is as listed below the nginx-ingress-controller. $ kubectl get pod --all-namespaces -o wideNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE. . . output. . . ingress-nginx nginx-ingress-controller-85c8787886-vf5tp 1/1 Running 0 1d 10. 244. 1. 4 kn1. virtomation. com. . . output. . . ingress-vm Final ThoughtsWe have went through quite a bit in this deep dive from installation, KubeVirt specific networking details and kubernetes, host, pod and virtual machine level configurations. Finishing up with the packet flow between virtual machine to virtual machine and ingress to virtual machine. " }, { - "id": 150, + "id": 149, "url": "/2018/This-Week-in-Kube-Virt-22.html", "title": "This Week In Kube Virt 22", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a close-to weekly update from the KubeVirt team. In general there is now more work happening outside of the core kubevirtrepository. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last two weeks we achieved to: Release KubeVirt v0. 4. 0(https://github. com/kubevirt/kubevirt/releases/tag/v0. 4. 0) Many networking fixes (@mlsorensen @vladikr)(https://github. com/kubevirt/kubevirt/pull/870https://github. com/kubevirt/kubevirt/pull/869https://github. com/kubevirt/kubevirt/pull/847https://github. com/kubevirt/kubevirt/pull/856https://github. com/kubevirt/kubevirt/pull/839https://github. com/kubevirt/kubevirt/pull/830) Aligned config reading for virtctl (@rmohr)(https://github. com/kubevirt/kubevirt/pull/860) Subresource Aggregated API server for console endpoints (@vossel)(https://github. com/kubevirt/kubevirt/pull/770) Enable OpenShift tests in CI (@alukiano @rmohr)(https://github. com/kubevirt/kubevirt/pull/833) virtctl convenience functions for start/stop of VMs (@sgott)(https://github. com/kubevirt/kubevirt/pull/817) Ansible - Improved Gluster support for kubevirt-ansible(https://github. com/kubevirt/kubevirt-ansible/pull/174) POC Device Plugins for KVM and network (@mpolednik @phoracek)https://github. com/kubevirt/kubernetes-device-plugins In addition to this, we are also working on: Additional network glue approach (@vladikr)(https://github. com/kubevirt/kubevirt/pull/787) CRD validation using OpenAPIv3 (@vossel)(https://github. com/kubevirt/kubevirt/pull/850) Windows VM tests (@alukiano)(https://github. com/kubevirt/kubevirt/pull/809) Data importer - Functional tests(https://github. com/kubevirt/containerized-data-importer/pull/81) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 151, + "id": 150, "url": "/2018/changelog-v0.4.0.html", "title": "KubeVirt v0.4.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 4. 0: Released on: Fri Apr 6 16:40:31 2018 +0200 Fix several networking issues Add and enable OpenShift support to CI Add conditional Windows tests (if an image is present) Add subresources for console access virtctl config alignmnet with kubectl Fix API reference generation Stable UUIDs for OfflineVirtualMachines Build virtctl for MacOS and Windows Set default architecture to x86_64 Major improvement to the CI infrastructure (all containerized) virtctl convenience functions for starting and stopping a VM" }, { - "id": 152, + "id": 151, "url": "/2018/This-Week-in-Kube-Virt-21.html", "title": "This Week In Kube Virt 21", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. In general there is now more work happening outside of the core kubevirtrepository. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last two weeks we achieved to: Multi platform (Windows, Mac, Linux) support for virtctl (@slintes)(https://github. com/kubevirt/kubevirt/pull/811) Stable UUIDs for OfflineVirtualMachines (@fromanirh)(https://github. com/kubevirt/kubevirt/pull/766) OpenShift support for CI (@alukiano, @rmohr)(https://github. com/kubevirt/kubevirt/pull/792) v2v improvements - for easier imports of existing VMs (@pkliczewski)(https://github. com/kubevirt/v2v-job) Data importer - to import existing disk images (@copejon @jeffvance)(https://github. com/kubevirt/containerized-data-importer) POC Device Plugins for KVM and network (@mpolednik @phoracek)https://github. com/kubevirt/kubernetes-device-plugins In addition to this, we are also working on: Subresources for consoles (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/770) Additional network glue approach (@vladikr)(https://github. com/kubevirt/kubevirt/pull/787) virtctl convenience functions for start/stop of VMs (@sgott)(https://github. com/kubevirt/kubevirt/pull/817) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 153, + "id": 152, "url": "/2018/This-Week-in-Kube-Virt-20.html", "title": "This Week In Kube Virt 20", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last two weeks we achieved to: Released KubeVirt v0. 3. 0https://github. com/kubevirt/kubevirt/releases/tag/v0. 3. 0 Merged VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) Merged OfflineVirtualMachine (@pkotas)(https://github. com/kubevirt/kubevirt/pull/667) Merged ephemeral disk support (@alukiano)(https://github. com/kubevirt/kubevirt/pull/728) Fixes to test KubeVirt on OpenShift (@alukiano)(https://github. com/kubevirt/kubevirt/pull/774) Scheduler awareness of VM pods (@vladikr)(https://github. com/kubevirt/kubevirt/pull/673) Plain text inline cloud-init (@alukiano)(https://github. com/kubevirt/kubevirt/pull/757) Define guest specific labels to be used with presets (@yanirq)(https://github. com/kubevirt/kubevirt/pull/767) Special note: A ton of automation, CI, and test fixes (@rmohr) In addition to this, we are also working on: Stable UUIDs for OfflineVirtualMachines (@fromanirh)(https://github. com/kubevirt/kubevirt/pull/766) Subresources for consoles (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/770) Additional network glue approach (@vladikr)(https://github. com/kubevirt/kubevirt/pull/787) Improvement for testing on OpenShift (@alukiano)(https://github. com/kubevirt/kubevirt/pull/792) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 154, + "id": 153, "url": "/2018/changelog-v0.3.0.html", "title": "KubeVirt v0.3.0", "author" : "kube🤖", "tags" : "release notes, changelog", "body": "v0. 3. 0: Released on: Thu Mar 8 10:21:57 2018 +0100 Kubernetes compatible networking Kubernetes compatible PV based storage VirtualMachinePresets support OfflineVirtualMachine support RBAC improvements Switch to q35 machien type by default A large number of test and CI fixes Ephemeral disk support" }, { - "id": 155, + "id": 154, "url": "/2018/This-Week-in-Kube-Virt-19.html", "title": "This Week In Kube Virt 19", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a bi-weekly update from the KubeVirt team. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last two weeks we achieved to: Support for native file-system PVs as disk storage (@alukiano,@davidvossel) (https://github. com/kubevirt/kubevirt/pull/734,https://github. com/kubevirt/kubevirt/pull/671) Support for native pod networking for VMs (@vladikr)(https://github. com/kubevirt/kubevirt/pull/686) Many patches to improve kubevirt-ansible usability(https://github. com/kubevirt/kubevirt-ansible/pulse/monthly) Introduce the kubernetes-device-plugins (@mpolednik)(https://github. com/kubevirt/kubernetes-device-plugins/) Introduce the kubernetes-device-plugin for bridge networking(@mpolednik)(https://github. com/kubevirt/kubernetes-device-plugins/pull/4) Add vendor/ tree (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/715) Expose disk bus (@fromani)(https://github. com/kubevirt/kubevirt/pull/672) Allow deploying OpenShift in vagrant (@alukiano)(https://github. com/kubevirt/kubevirt/pull/631) Release of v0. 3. 0-alpha. 3(https://github. com/kubevirt/kubevirt/releases/tag/v0. 3. 0-alpha. 3) In addition to this, we are also working on: Implement VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) Implement OfflineVirtualMachines (@pkotas)(https://github. com/kubevirt/kubevirt/pull/667) Expose CPU requirements in VM pod (@vladikr)(https://github. com/kubevirt/kubevirt/pull/673) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 156, + "id": 155, "url": "/2018/This-Week-in-Kube-Virt-18.html", "title": "This Week In Kube Virt 18", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Rework our architecture Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last weeks we achieved to: Move to a decentralized the architecture (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/663) Drop live migration for now (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/702) Change default network provider to flannel (@alukiano)(https://github. com/kubevirt/kubevirt/pull/710) Adjust uuid API (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/675) Make cirros and alpine ready for q35 (@rmohr)(https://github. com/kubevirt/kubevirt/pull/688) In addition to this, we are also working on: Decentralized pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/686) Implement VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) Implement OfflineVirtualMachines (@pkotas)(https://github. com/kubevirt/kubevirt/pull/667) Allow deploying OpenShift in vagrant (@alukiano)(https://github. com/kubevirt/kubevirt/pull/631) Expose CPU requirements in VM pod (@vladikr)(https://github. com/kubevirt/kubevirt/pull/673) Add support for PVs via kubelet (@alukiano)(https://github. com/kubevirt/kubevirt/pull/671) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 157, + "id": 156, "url": "/2018/This-Week-in-Kube-Virt-17.html", "title": "This Week In Kube Virt 17", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Rework our architecture Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Over the weekend you could have seen our talks at devconf. cz: “Kubernetes Cloud Autoscaler for IsolatedWorkloads” by @rmohr “Outcast: Virtualization in a containerworld?” by @fabiand Within the last weeks we achieved to: Introduced Fedora Cloud image for testing (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/685) Switch to q35 by default (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/650) In addition to this, we are also working on: Decentralize the architecture (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/663) Decentralized pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/686) Implement VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) Allow deploying OpenShift in vagrant (@alukiano)(https://github. com/kubevirt/kubevirt/pull/631) Expose CPU requirements in VM pod (@vladikr)(https://github. com/kubevirt/kubevirt/pull/673) Adjust uuid API (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/675) Make cirros and alpine ready for q35 (@rmohr)(https://github. com/kubevirt/kubevirt/pull/688) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 158, + "id": 157, "url": "/2018/This-Week-in-Kube-Virt-16-size-XL.html", "title": "This Week In Kube Virt 16 Size Xl", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team - including the holidaybacklog update. We are currently driven by Building a solid user-story around KubeVirt Caring about end-to-end (backend, core, ui) Rework out architecture Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Being easier to be used on Kubernetes and OpenShift Within the last weeks we achieved to: Drop of HAProxy and redeisng of console access (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/618) Dockerized builds to make sure the build env matches the runtime env(@rmohr and others)(https://github. com/kubevirt/kubevirt/pull/647) OwnerReference fixes (@alukiano)(https://github. com/kubevirt/kubevirt/pull/642) OfflineVirtualMachineDesign documentation (@petrkotas)(https://github. com/kubevirt/kubevirt/pull/641) Further RBAC improvements (@gbenhaim)(https://github. com/kubevirt/kubevirt/pull/640) User-Guide The guide saw many updates also for planned stuff Update to reflect v0. 2. 0 changes (@rmohr)(https://github. com/kubevirt/user-guide/pull/12) NodeSelector and affinity (@rmohr)(https://github. com/kubevirt/user-guide/pull/15) Hardware configuration (@rmohr)(https://github. com/kubevirt/user-guide/pull/14) Volumes and disks (@rmohr)(https://github. com/kubevirt/user-guide/pull/13) Cloud-Init (@davidvossel)(https://github. com/kubevirt/user-guide/pull/10) API Reference Now updated regularly (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/643)https://kubevirt. io/api-reference/master/definitions. html Demo Got updated to v0. 2. 0 (@fabiand) But an issue with virtctl was introduced https://github. com/kubevirt/demo UI The WIP KubeVirt provider for ManageIQ was showcased(@masayag @pkliczewski) https://github. com/ManageIQ/manageiq-providers-kubevirt/ Video: https://www. youtube. com/watch?v=9Gf2Nv7h558 Screenshot: UI The Cockpit plugin makes some progress (@mlibra) https://github. com/cockpit-project/cockpit/wiki/Feature:-Kubernetes:-KubeVirt-support-enhancements https://github. com/cockpit-project/cockpit/pull/7830 Screenshot: Ansible Move to stable kubevirt release manifests (@gbenhaim)(https://github. com/kubevirt-incubator/kubevirt-ansible/pull/37) Many improvements to make it work seamlessly(@gbenhaim @lukas-bednar) In addition to this, we are also working on: Decentralize the architecture (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/663) Implement VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) virtctl fixes (@davidvossel and @awels)(https://github. com/kubevirt/kubevirt/pull/648) Move to q35 machine type (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/650) Allow deploying OpenShift in vagrant (@alukiano)(https://github. com/kubevirt/kubevirt/pull/631) User-Guide: Offline Virtual Machine docs (@petrkotas)(https://github. com/kubevirt/user-guide/pull/9) Persistent Virtual Machines (@stu-gott)(https://github. com/kubevirt/user-guide/pull/11) Storage Working on enabling PV cloning using PVannotations (@aglitke)(https://github. com/aglitke/external-storage/tree/clone-poc) Working on optimizing Gluster for in-cluster storage Working on the ability to simplify VM image uploads Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 159, + "id": 158, "url": "/2018/This-Week-in-Kube-Virt-16-Holiday-Wrap-Up-Edition.html", "title": "This Week In Kube Virt 16 Holiday Wrap Up Edition", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team - including the holidaybacklog update. We are currently driven by Being easier to be used on Kubernetes and OpenShift Rework out architecture Getting dependencies into shape (storage) Improve the user-experience for users (UI, deployment) Within the last weeks we achieved to: Drop of HAProxy and redeisng of console access (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/618) Dockerized builds to make sure the build env matches the runtime env(@rmohr and others)(https://github. com/kubevirt/kubevirt/pull/647) OwnerReference fixes (@alukiano)(https://github. com/kubevirt/kubevirt/pull/642) OfflineVirtualMachineDesign documentation (@petrkotas)(https://github. com/kubevirt/kubevirt/pull/641) Further RBAC improvements (@gbenhaim)(https://github. com/kubevirt/kubevirt/pull/640) User-Guide The guide saw many updates also for planned stuff Update to reflect v0. 2. 0 changes (@rmohr)(https://github. com/kubevirt/user-guide/pull/12) NodeSelector and affinity (@rmohr)(https://github. com/kubevirt/user-guide/pull/15) Hardware configuration (@rmohr)(https://github. com/kubevirt/user-guide/pull/14) Volumes and disks (@rmohr)(https://github. com/kubevirt/user-guide/pull/13) Cloud-Init (@davidvossel)(https://github. com/kubevirt/user-guide/pull/10) API Reference Now updated regularly (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/643)https://kubevirt. io/api-reference/master/definitions. html Demo Got updated to v0. 2. 0 (@fabiand) But an issue with virtctl was introduced https://github. com/kubevirt/demo UI The WIP KubeVirt provider for ManageIQ was showcased(@masayag @pkliczewski) https://github. com/ManageIQ/manageiq-providers-kubevirt/ https://www. youtube. com/watch?v=9Gf2Nv7h558 UI The Cockpit plugin makes some progress (@mlibra): Ansible Move to stable kubevirt release manifests (@gbenhaim)(https://github. com/kubevirt-incubator/kubevirt-ansible/pull/37) Many improvements to make it work seamlessly(@gbenhaim @lukas-bednar) In addition to this, we are also working on: Decentralize the architecture (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/663) Implement VirtualMachinePresets (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/652) virtctl fixes (@davidvossel and @awels)(https://github. com/kubevirt/kubevirt/pull/648) Move to q35 machine type (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/650) Allow deploying OpenShift in vagrant (@alukiano)(https://github. com/kubevirt/kubevirt/pull/631) User-Guide: Offline Virtual Machine docs (@petrkotas)(https://github. com/kubevirt/user-guide/pull/9) Persistent Virtual Machines (@stu-gott)(https://github. com/kubevirt/user-guide/pull/11) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 160, + "id": 159, "url": "/2018/Some-notes-on-some-highlights-of-v020.html", "title": "Some Notes On Some Highlights Of V020", "author" : "fabiand", "tags" : "release notes, hilights", "body": "The very first KubeVirt release of KubeVirt in the new year(https://github. com/kubevirt/kubevirt/releases/v0. 2. 0) had a fewnotable highlights which were brewing over the last few weeks. VirtualMachine API redesignPreviously the VirtualMachine API was pretty much aligned, or a 1:1mapping, to libvirt’s domxml. With this change however, we took a stepback and redesigned the API to be more Kubernet-ish than libvirt-ish. Some changes, like the extraction of source volumes, will actually helpus to implement other patterns - like VirtualMachinePresets. Removal of HAProxyThis is another nice one. So far we were using a custom API server forperforming object validation. But the use of this custom API serverrequired that the client was accessing the custom API server, and notthe main one. The multiplexing of redirecting certain requests to ourand other requests to the main API server was done by HA proxy. Somewhatlike a poor mans API server aggregation. However, now we are focusing on CRDs completely (we considered to go toAPI server aggregation, but dropped this approach), which involves doingthe validation of the CRD based on a json scheme. Redesign of VNC/Console accessThe afore mentioned custom API server contained subresources to permitinbound access to the graphical and serial consoel of VMs. But this doesnot work with CRDs and thus we are now using a different approach toprovide access to those. The new implementation leverages the kubectl exec path in order topipe the graphical and serial console from the VM to the client. This ispretty nice, as we are leveraging Kubernetes for doing the piping, wemerely provide a kubectl plugin in order to ease the consumption ofthis. Side note is that the API of the kubectl plugin did actually notchange. " }, { - "id": 161, + "id": 160, "url": "/2018/Kube-Virt-v020.html", "title": "Kube Virt v0.2.0", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This release follows v0. 1. 0 and consists of 131 changes, contributed by6 people, leading to 148 files changed, 9096 insertions(+), 5871deletions(-). The source code and selected binaries are available for download at:https://github. com/kubevirt/kubevirt/releases/tag/v0. 2. 0. The primary release artifact of KubeVirt is the git tree. The releasetag is signed and can be verified using [git-evtag][git-evtag]. Pre-built containers are published on Docker Hub and can be viewed at:https://hub. docker. com/u/kubevirt/. Notable changes VM launch and shutdown flow improvements VirtualMachine API redesign Removal of HAProxy Redesign of VNC/Console access Initial support for different vagrant providers Contributors6 people contributed to this release: 65 Roman Mohr <rmohr@redhat. com>60 David Vossel <dvossel@redhat. com> 2 Fabian Deutsch <fabiand@redhat. com> 2 Stu Gott <sgott@redhat. com> 1 Marek Libra <mlibra@redhat. com> 1 Martin Kletzander <mkletzan@redhat. com>Test Results Ran 40 of 42 Specs in 703. 532 seconds SUCCESS! — 40 Passed 0 Failed     0 Pending 2 Skipped PASS Additional Resources Mailing list: https://groups. google. com/forum/#!forum/kubevirt-dev IRC: <irc://irc. freenode. net/#kubevirt> An easy to use demo: https://github. com/kubevirt/demo [How to contribute][contributing] [License][license] [git-evtag]: https://github. com/cgwalters/git-evtag#using-git-evtag[contributing]:https://github. com/kubevirt/kubevirt/blob/main/CONTRIBUTING. md[license]: https://github. com/kubevirt/kubevirt/blob/main/LICENSE " }, { - "id": 162, + "id": 161, "url": "/2017/This-Week-in-Kube-Virt-15.html", "title": "This Week In Kube Virt 15", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Streamlining and improving the Kubernetes experience This week we achieved to: VM Grace period and shutdown improvements (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/526)On another side we were also successful in: Initiating a Kubernetes WG Virtualization mainlinglist:https://groups. google. com/forum/#!forum/kubernetes-wg-virtualization Triggering a #virtualization slack channel in the Kubernetes org In addition to this, we are also working on quite a few things: Serial console rework (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/613) VM API Redesign (@rmohr)(https://github. com/kubevirt/kubevirt/pull/606) Add OpenShift support (@karimb)(https://github. com/kubevirt/kubevirt/pull/608,https://github. com/kubevirt-incubator/kubevirt-ansible/pull/29) Ansible Broker support (@gbenhaim)(https://github. com/kubevirt-incubator/kubevirt-ansible/pull/30) Improve development builds (@petrkotas)(https://github. com/kubevirt/kubevirt/pull/609) - Take a look atthe pulse, to get an overview over all changes of this week:https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 163, + "id": 162, "url": "/2017/This-Week-in-Kube-Virt-14.html", "title": "This Week In Kube Virt 14", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute This week you could have met us at: KubeCon NA: Virtualizing Workloads Saloon(https://kccncna17. sched. com/event/43ebdf89846d7f4939810bbaeb5a3229)Some minutes athttps://docs. google. com/document/d/1B3zbJA0MTQ82yu2JNMREEiVaQJTG3PfGBfdogsFISBE/editThis week we achieved to: Release KubeVirt v0. 1. 0(https://github. com/kubevirt/kubevirt/releases/tag/v0. 1. 0) Improve the manifest situation (@rmohr)(https://github. com/kubevirt/kubevirt/pull/602) In addition to this, we are also working on: OpenAPI improvements (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/603) Describe how device assignment can work (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/593) VM Unknown state (@rmohr)(https://github. com/kubevirt/kubevirt/issues/543) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 164, + "id": 163, "url": "/2017/Kube-Virt-v010.html", "title": "Kube Virt v0.1.0", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This release follows v0. 0. 4 and consists of 115 changes, contributed by11 people, leading to 121 files changed, 5278 insertions(+), 1916deletions(-). The source code and selected binaries are available for download at:https://github. com/kubevirt/kubevirt/releases/tag/v0. 1. 0. The primary release artifact of KubeVirt is the git tree. The releasetag is signed and can be verified using [git-evtag][git-evtag]. Pre-built containers are published on Docker Hub and can be viewed at:https://hub. docker. com/u/kubevirt/. Notable changes Many API improvements for a proper OpenAPI reference Add watchdog support Drastically improve the deployment on non-vagrant setups Dropped nodeSelectors Separated inner component deployment from edge component deployment Created separate manifests for developer, test, and releasedeployments Moved komponents to kube-system namespace Improved and unified flag parsing Contributors11 people contributed to this release: 42 Roman Mohr <rmohr@redhat. com>20 David Vossel <dvossel@redhat. com>18 Lukas Bednar <lbednar@redhat. com>14 Martin Polednik <mpolednik@redhat. com> 7 Fabian Deutsch <fabiand@redhat. com> 6 Lukianov Artyom <alukiano@redhat. com> 3 Vladik Romanovsky <vromanso@redhat. com> 2 Petr Kotas <petr. kotas@gmail. com> 1 Barak Korren <bkorren@redhat. com> 1 Francois Deppierraz <francois@ctrlaltdel. ch> 1 Saravanan KR <skramaja@redhat. com>Test Results Ran 44 of 46 Specs in 851. 185 seconds SUCCESS! — 44 Passed 0 Failed     0 Pending 2 Skipped PASS Additional Resources Mailing list: https://groups. google. com/forum/#!forum/kubevirt-dev IRC: <irc://irc. freenode. net/#kubevirt> An easy to use demo: https://github. com/kubevirt/demo [How to contribute][contributing] [License][license] [git-evtag]: https://github. com/cgwalters/git-evtag#using-git-evtag[contributing]:https://github. com/kubevirt/kubevirt/blob/main/CONTRIBUTING. md[license]: https://github. com/kubevirt/kubevirt/blob/main/LICENSE " }, { - "id": 165, + "id": 164, "url": "/2017/This-Week-in-Kube-Virt-13.html", "title": "This Week In Kube Virt 13", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute This week you can meet us at: KubeCon NA: Virtualizing Workloads Saloon(https://kccncna17. sched. com/event/43ebdf89846d7f4939810bbaeb5a3229)This week we still achieved to: Owner References for VM ReplicaSet (@rmohr)(https://github. com/kubevirt/kubevirt/pull/596)In addition to this, we are also working on: Manifest refactoring (@rmohr)(https://github. com/kubevirt/kubevirt/pull/602) OpenAPI improvements (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/603) Describe how device assignment can work (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/593) VM Unknown state (@rmohr)(https://github. com/kubevirt/kubevirt/issues/543) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 166, + "id": 165, "url": "/2017/This-Week-in-Kube-Virt-12.html", "title": "This Week In Kube Virt 12", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute This week we was really slow, but we still achieved to: Improve vagrant setup (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/586)In addition to this, we are also working on: GlusterFS support (@humblec)(https://github. com/kubevirt/kubevirt/pull/578) Describe how device assignment can work (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/593) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 167, + "id": 166, "url": "/2017/This-Week-in-Kube-Virt-11.html", "title": "This Week In Kube Virt 11", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute This week we achieved to: Generation of API documentation (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/571)(https://kubevirt. io/api-reference/master/definitions. html) Move components to kube-system namespace (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/558) Use glide again (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/576) In addition to this, we are also working on: GlusterFS support (@humblec)(https://github. com/kubevirt/kubevirt/pull/578)Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 168, + "id": 167, "url": "/2017/This-Week-in-Kube-Virt-10-base-10.html", "title": "This Week In Kube Virt 10 Base 10", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) Non-code wise this week The KVM Forum recording was published: “Running Virtual Machines onKubernetes with libvirt & KVM by Fabian Deutsch & Roman Mohr”(https://www. youtube. com/watch?v=Wh-ejUyuHJ0) Preparing the “virtualization saloon” at KubeCon NA(https://kccncna17. sched. com/event/CU8m) This week we achieved to: Further improve API documentation (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/549) Virtual Machine watchdog device support (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/544) Introduction of virt-dhcp (@vladikr)(https://github. com/kubevirt/kubevirt/pull/525) Less specific manifests(https://github. com/kubevirt/kubevirt/pull/560) (@fabiand) In addition to this, we are also working on: Addition of more tests to pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/525) Adding helm charts (@cynepco3hahue)(https://github. com/kubernetes/charts/pull/2669) Move manifests to kube-system namespace (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/558) Drafting the publishing of API docs (@lukas-bednar)(https://github. com/kubevirt-incubator/api-reference)(https://kubevirt. io/api-reference/master/definitions. html) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 169, + "id": 168, "url": "/2017/Kube-Virt-v004.html", "title": "Kube Virt v0.0.4", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This release follows v0. 0. 3 and consists of 133 changes, contributed by14 people, leading to 109 files changed, 7093 insertions(+), 2437deletions(-). The source code and selected binaries are available for download at:https://github. com/kubevirt/kubevirt/releases/tag/v0. 0. 4. The primary release artifact of KubeVirt is the git tree. The releasetag is signed and can be verified using [git-evtag][git-evtag]. Pre-built containers are published on Docker Hub and can be viewed at:https://hub. docker. com/u/kubevirt/. Notable changes Add support for node affinity to VM. Spec Add OpenAPI specification Drop swagger 1. 2 specification virt-launcher refactoring Leader election mechanism for virt-controller Move from glide to dep for dependency management Improve virt-handler synchronization loops Add support for running the functional tests on oVirt infrastructure Several tests fixes (spice, cleanup, …​) Add console test tool Improve libvirt event notification Contributors14 people contributed to this release: 46 David Vossel <dvossel@redhat. com>46 Roman Mohr <rmohr@redhat. com>12 Lukas Bednar <lbednar@redhat. com>11 Lukianov Artyom <alukiano@redhat. com> 4 Martin Sivak <msivak@redhat. com> 4 Petr Kotas <pkotas@redhat. com> 2 Fabian Deutsch <fabiand@redhat. com> 2 Milan Zamazal <mzamazal@redhat. com> 1 Artyom Lukianov <alukiano@redhat. com> 1 Barak Korren <bkorren@redhat. com> 1 Clifford Perry <coperry94@gmail. com> 1 Martin Polednik <mpolednik@redhat. com> 1 Stephen Gordon <sgordon@redhat. com> 1 Stu Gott <sgott@redhat. com>Test Results Ran 45 of 47 Specs in 797. 286 seconds SUCCESS! — 45 Passed 0 Failed     0 Pending 2 Skipped PASS Additional Resources Mailing list: https://groups. google. com/forum/#!forum/kubevirt-dev IRC: <irc://irc. freenode. net/#kubevirt> An easy to use demo: https://github. com/kubevirt/demo [How to contribute][contributing] [License][license] [git-evtag]: https://github. com/cgwalters/git-evtag#using-git-evtag[contributing]:https://github. com/kubevirt/kubevirt/blob/main/CONTRIBUTING. md[license]: https://github. com/kubevirt/kubevirt/blob/main/LICENSE " }, { - "id": 170, + "id": 169, "url": "/2017/This-Week-in-Kube-Virt-9.html", "title": "This Week In Kube Virt 9", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: Release KubeVirt v0. 0. 4(https://github. com/kubevirt/kubevirt/releases/tag/v0. 0. 4) virt-handler refactoring (@rmohr)(https://github. com/kubevirt/kubevirt/pull/530) Add support for running functional tests on oVirt infrastructure(@bkorren) (https://github. com/kubevirt/kubevirt/pull/379) Add OpenAPI specification (@lbednar)(https://github. com/kubevirt/kubevirt/pull/535) Consolidate console functional tests (@dvossel)(https://github. com/kubevirt/kubevirt/pull/541) Improve libvirt event notification (@rmohr)(https://github. com/kubevirt/kubevirt/pull/351) In addition to this, we are also working on: Addition of more tests to pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/525) Watchdog support (@dvossel)(https://github. com/kubevirt/kubevirt/pull/544) Leveraging ingress (@fabiand)(https://github. com/kubevirt/kubevirt/pull/538) Adding helm charts (@cynepco3hahue)(https://github. com/kubernetes/charts/pull/2669) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 171, + "id": 170, "url": "/2017/This-Week-in-Kube-Virt-8.html", "title": "This Week In Kube Virt 8", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is a weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: Present at KVM Forum, Prague (@rmohr, @fabiand)http://slides. com/fabiand/running-virtual-machines-on-kubernetes-at-kvm-forum-2017# Proposal on how to construct the VM API (@rmohr, @michalskrivanek)https://github. com/kubevirt/kubevirt/pull/466 Pod deletion improvements (@davidvossel)https://github. com/kubevirt/kubevirt/pull/531 In addition to this, we are also working on: Addition of more tests to pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/525) Access to the node control network (@rmohr)(https://github. com/kubevirt/kubevirt/pull/499) Custom VM metrics discussion (@fromanirh)(https://github. com/kubevirt/kubevirt/pull/487) Simple persistence mechanism documentation (@mpolednik)(https://github. com/kubevirt/user-guide/pull/6) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 172, + "id": 171, "url": "/2017/This-Week-in-Kube-Virt-7.html", "title": "This Week In Kube Virt 7", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the seventh weekly update from the KubeVirt team. This week you can read more or speak to us at: KVM Forum, Prague Thursday, October 26, 10:00 - 10:45https://kvmforum2017. sched. com/event/BnoA “KubeWHAT?” by S Gordon - On KubeVirt and OpenStack (past event)https://www. slideshare. net/sgordon2/kubewhat We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: VMs and components are now running in the host pid namespace(@dvossel) https://github. com/kubevirt/kubevirt/pull/506 Move dependency management from glide to dep ()https://github. com/kubevirt/kubevirt/pull/511 Add a leader election mechanism to virt-controller (@)https://github. com/kubevirt/kubevirt/pull/461 Add OpenAPI specification (@)https://github. com/kubevirt/kubevirt/pull/494 Put work on api server aggregation on hold for now (@stu-gott) To beresolved: API server storage(https://github. com/kubevirt/kubevirt/pull/355) In addition to this, we are also working on: Finalization of pod networking (@vladikr)(https://github. com/kubevirt/kubevirt/pull/525) Access to the node control network (@rmohr)(https://github. com/kubevirt/kubevirt/pull/499) Custom VM metrics discussion (@fromanirh)(https://github. com/kubevirt/kubevirt/pull/487) Simple persistence mechanism (@petrkotas)(https://github. com/petrkotas/virt-vmconfig-crd/) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 173, + "id": 172, "url": "/2017/This-Week-in-Kube-Virt-6.html", "title": "This Week In Kube Virt 6", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the sixth weekly update from the KubeVirt team. This week you could watch us at: Kubernetes Community Meeting introducing and demoing KubeVirt:https://www. youtube. com/watch?v=oBhu1MeGbss Or follow us at our new blog: https://kubevirt. github. io/blogs/ We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: Add support for node affinity to VM. Spec (@MarSik)(https://github. com/kubevirt/kubevirt/pull/446)In addition to this, we are also working on: Access to the node control network (@rmohr)(https://github. com/kubevirt/kubevirt/pull/499) Custom VM metrics discussion (@fromanirh)(https://github. com/kubevirt/kubevirt/pull/487) Continued work on api server aggregation (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/355) Revived VM Config discussion (@mpolednik)(https://github. com/kubevirt/kubevirt/pull/408) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 174, + "id": 173, "url": "/2017/This-Week-in-Kube-Virt-5.html", "title": "This Week In Kube Virt 5", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the fith weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: Improved sagger documentation (for SDK generation) (@lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/476) Kubernetes 1. 8 fixes (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/479https://github. com/kubevirt/kubevirt/pull/484) Ephemeral disk rewrite (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/460) Custom VM metrics proposal (@fromanirh )(https://github. com/kubevirt/kubevirt/pull/487) [WIP] Add API server PKI tool (@jhernand)(https://github. com/kubevirt/kubevirt/pull/498) KubeVirt provider for the cluster autoscaler (@rmohr)(https://github. com/rmohr/autoscaler/pull/1) In addition to this, we are also working on: Finally some good progress with layer 3 network connectivity(@vladikr) (https://github. com/kubevirt/kubevirt/pull/450https://github. com/vladikr/kubevirt/tree/veth-bridge-taphttps://github. com/vladikr/kubevirt/tree/veth-macvtap) Continued work on api server aggregation (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/355) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 175, + "id": 174, "url": "/2017/This-Week-in-Kube-Virt-4.html", "title": "This Week In Kube Virt 4", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the fourth weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week you can find us at: Ohio Linux Fest (@stu-gott) “KubeVirt, Virtual Machine ManagementUsing Kubernetes” https://ohiolinux. orgThis week we achieved to: ReplicaSet for VirtualMachines (@rmohr)(https://github. com/kubevirt/kubevirt/pull/453) Swagger documentation improvements (@rmohr, @lukas-bednar)(https://github. com/kubevirt/kubevirt/pull/475) Hot-standby for our controller (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/461) domxml/VM Spec mapping rules proposal (@rmohr, @michalskrivanek)(https://github. com/kubevirt/kubevirt/pull/466) Launch flow improvement proposal (@davidvossel)(https://github. com/kubevirt/kubevirt/pull/469) In addition to this, we are also working on: Debug layer 3 network connectivity issues for VMs (@vladikr)(https://github. com/kubevirt/kubevirt/pull/450) Review of the draft code for the api server aggregation (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/355) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 176, + "id": 175, "url": "/2017/This-Week-in-Kube-Virt-3.html", "title": "This Week In Kube Virt 3", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the third weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute Node Isolator use-case (more informations soon) This week we achieved to: Renamed VM kind to VirtualMachine (@cynepco3hahue)(https://github. com/kubevirt/kubevirt/pull/452) Proposal for VirtualMachineReplicaSet to scale VMs (@rmohr)(https://github. com/kubevirt/kubevirt/pull/453) Ephemeral Registry Disk Rewrite (@vossel)(https://github. com/kubevirt/kubevirt/pull/460) Fix some race in our CI (@rmohr)(https://github. com/kubevirt/kubevirt/pull/459) In addition to this, we are also working on: Review of the draft code to get layer 3 network connectivity for VMs(@vladikr) (https://github. com/kubevirt/kubevirt/pull/450) Review of the draft code for the api server aggregation (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/355) Review of the proposal integrate with host networking (@rmohr)(https://github. com/kubevirt/kubevirt/pull/367) Converging multiple ansible playbooks for deployment on OpenShift(@petrkotas, @cynepco3hahue, @lukas-bednar)(https://github. com/kubevirt-incubator/kubevirt-ansible) Continued discussion of VM persistence and ABI stability(https://groups. google. com/d/topic/kubevirt-dev/G0FpxJYFhf4/discussion) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 177, + "id": 176, "url": "/2017/This-Week-in-Kube-Virt-2.html", "title": "This Week In Kube Virt 2", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the second weekly update from the KubeVirt team. We are currently driven by Being easier to be used on Kubernetes and OpenShift Enabling people to contribute This week we achieved to: Keep cloud-init data in Secrets (@vossel)(https://github. com/kubevirt/kubevirt/pull/433) First draft code to get layer 3 network connectivity for VMs(@vladikr) (https://github. com/kubevirt/kubevirt/pull/450) First draft code for the api server aggregation (@stu-gott)(https://github. com/kubevirt/kubevirt/pull/355) Add further migration documentation (@rmohr)(https://github. com/kubevirt/user-guide/pull/1) In addition to this, we are also working on: Progress on how to integrate with host networking (@rmohr)(https://github. com/kubevirt/kubevirt/pull/367) Converging multiple ansible playbooks for deployment on OpenShift(@petrkotas, @cynepco3hahue, @lukas-bednar)(https://github. com/kubevirt-incubator/kubevirt-ansible) Initial support for Anti- & Affinity for VMs (@MarSik)(https://github. com/kubevirt/kubevirt/issues/438) Initial support for memory and cpu mapping (@MarSik)(https://github. com/kubevirt/kubevirt/pull/388) Discussing VM persistence and ABI stability(https://groups. google. com/d/topic/kubevirt-dev/G0FpxJYFhf4/discussion) Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues And keep track of events at our calendar18pc0jur01k8f2cccvn5j04j1g@group. calendar. google. com If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt> " }, { - "id": 178, + "id": 177, "url": "/2017/This-Week-in-Kube-Virt-1.html", "title": "This Week In Kube Virt 1", "author" : "fabiand", "tags" : "release notes, changelog", "body": "This is the first weekly update from the KubeVirt team. We are currently driven by Being easier to consume on Kubernetes and OpenShiftThis week we achieved to merge a design for cloud-init support(https://github. com/kubevirt/kubevirt/pull/372) release KubeVirt v0. 0. 2(https://github. com/kubevirt/kubevirt/releases/tag/v0. 0. 2) Minikube based demo (https://github. com/kubevirt/demo) OpenShift Community presentation(https://www. youtube. com/watch?v=IfuL2rYhMKY) In addition to this, we are also working on: Support stock Kubernetes networking(https://github. com/kubevirt/kubevirt/issues/261) Move to a custom API Server suitable for API Server aggregation(https://github. com/kubevirt/kubevirt/issues/205) Writing a user facing getting started guide(https://github. com/kubevirt/kubevirt/issues/410) Ansible playbooks for deployment on OpenShift Take a look at the pulse, to get an overview over all changes of thisweek: https://github. com/kubevirt/kubevirt/pulse Finally you can view our open issues athttps://github. com/kubevirt/kubevirt/issues If you need some help or want to chat you can find us on<irc://irc. freenode. net/#kubevirt>. " }, { - "id": 179, + "id": 178, "url": "/2017/technology-comparison.html", "title": "Comparing KubeVirt to other technologies", "author" : "Fabian Deutsch", "tags" : "KubeVirt, ClearContainers, virtlet, CRI, OpenStack, ovirt", "body": "Is KubeVirt a replacement for $MYVMMGMTSYSTEM?: Maybe. The primary goal of KubeVirt is to allow running virtual machines ontop of Kubernetes. It’s focused on the virtualization bits. General virtualization management systems like i. e. OpenStack or oVirt usuallyconsist of some additional services which take care of i. e. network management,host provisioning, data warehousing, just to name a few. These services are outof scope of KubeVirt. That being said, KubeVirt is intended to be part of a virtualization managementsystem. It can be seen as an VM cluster runtime, and additional componentsprovide additional functionality to provide a nice coherent user-experience. Is KubeVirt like ClearContainers?: No. ClearContainersare about using VMs to isolate pods or containers on the container runtimelevel. KubeVirt on the other hand is about allowing to manage virtual machines on acluster level. But beyond that it’s also how virtual machines are exposed. ClearContainers hide the fact that a virtual machine is used, but KubeVirt ishighly interested in providing an API to configure a virtual machine. Is KubeVirt like virtlet?: Somewhat. virtlet is a CRIimplementation to run virtual machines instead of containers. The key differences to KubeVirt are: It’s a CRI. This implies that the VM runtime is on the host, and that thekubelet is configured to use it. KubeVirt on the other hand can be deployed as a native Kubernetes add-on. Pod API. The virtlet is using a Pod API to specify the VM. Certainfields like i. e. volumes are mapped to the corresponding VM functionality. This is problematic, there are many details to VMs which can not be mappedto a Pod counterpart. Eventually annotations can be used to cover thoseproperties. KubeVirt on the other hand exposes a VM specific API, which tries to coverall properties of a VM. Why Kubernetes and not bringing containers to OpenStack or oVirt ?: We think that Container workloads are the future. Therefore we want to add VMsupport on top of a container management system instead of building containersupport into a VM management system. " }, { - "id": 180, + "id": 179, "url": "/2017/role-of-libvirt.html", "title": "The Role of LibVirt", "author" : "Fabian Deutsch", "tags" : "libvirt", "body": "Libvirt project. Can I perform a 1:1 translation of my libvirt domain xml to a VM Spec?: Probably not, libvirt is intended to be run on a host and the domain XML isbased on this assumption, this implies that the domain xml allows you to accesshost local resources i. e. local paths, host devices, and host deviceconfigurations. A VM Spec on the other hand is designed to work with cluster resources. And itdoes not permit to address host resources. Does a VM Spec support all features of libvirt?: No, libvirt has a wide range of features, reaching beyond pure virtualizationfeatures, into host, network, and storage management. The API was driven by therequirements of running virtualization on a host. A VM Spec however is a VM definition on the cluster level, this by itselfmeans that the specification has different requirements, i. e. it also needs toinclude scheduling information and KubeVirt specifically builds on Kubernetes, which allows it to reuse thesubsystems for consuming network and storage, which on the other hand meansthat the corresponding libvirt features will not be exposed. " }, { - "id": 181, + "id": 180, "url": "/galleries/2020-01-31-DevConfCZ2020-in-pictures", "title": "DevConf.cz 2020 in pictures", "author" : "Pablo Iranzo Gómez", "tags" : "", "body": "Here are some of the pictures of KubeVirt presence at DevConf. cz 2020. " }, { - "id": 182, + "id": 181, "url": "/galleries/2020-02-03-Fosdem2020-communty-presence", "title": "FOSDEM 2020 in pictures", "author" : "Pablo Iranzo Gómez", "tags" : "", "body": "Here are some of the pictures of KubeVirt presence at FOSDEM 2020. " }, , { - "id": 183, + "id": 182, "url": "/pages/alicloud", "title": "Easy install using AliCloud", "author" : "", "tags" : "", "body": " - " }, , { - "id": 184, + "id": 183, "url": "/pages/azure", "title": "Easy install using Azure", "author" : "", "tags" : "", "body": " - " }, , , { - "id": 185, + "id": 184, "url": "/pages/cloud", "title": "Easy install on cloud providers", "author" : "", "tags" : "", "body": " - " }, , { - "id": 186, + "id": 185, "url": "/category/community.html", "title": "Community", "author" : "", "tags" : "", "body": " - " }, , { - "id": 187, + "id": 186, "url": "/blogs/community", "title": "Community", "author" : "", "tags" : "", - "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date " + "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date " }, , , , { - "id": 188, + "id": 187, "url": "/blogs/date", "title": "Grouped by Date", "author" : "", "tags" : "", - "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Post calendar: JanFebMarAprMayJunJulAugSepOctNovDec2023  3 1 3 2 2 20223112121311  20212123112222 12020352432321214201932222253275420185234863134422017      2 4454 2023: November: 📅 07: Announcing KubeVirt v1. 1 📅 06: KubeVirt v1. 1. 0 September: 📅 06: Running KubeVirt with Cluster Autoscaler 📅 05: Managing KubeVirt VMs with Ansible July: 📅 24: NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes 📅 11: KubeVirt v1. 0 has landed! 📅 06: KubeVirt v1. 0. 0 May: 📅 31: Secondary networks connected to the physical underlay for KubeVirt VMs using OVN-Kubernetes March: 📅 06: Secondary networks for KubeVirt VMs using OVN-Kubernetes 📅 03: KubeVirt Summit 2023! 📅 01: KubeVirt v0. 59. 0 2022: October: 📅 13: KubeVirt v0. 58. 0 September: 📅 12: KubeVirt v0. 57. 0 August: 📅 18: KubeVirt v0. 56. 0 📅 12: Simplifying KubeVirt's `VirtualMachine` UX with Instancetypes and Preferences 📅 02: KubeVirt: installing Microsoft Windows 11 from an ISO July: 📅 14: KubeVirt v0. 55. 0 June: 📅 28: KubeVirt at KubeCon EU 2022 📅 08: KubeVirt v0. 54. 0 May: 📅 09: KubeVirt v0. 53. 0 April: 📅 08: KubeVirt v0. 52. 0 📅 03: Load-balancer for virtual machines on bare metal Kubernetes clusters March: 📅 08: KubeVirt v0. 51. 0 February: 📅 09: KubeVirt v0. 50. 0 January: 📅 25: Dedicated migration network in KubeVirt 📅 24: KubeVirt Summit is coming back! 📅 11: KubeVirt v0. 49. 0 2021: December: 📅 06: KubeVirt v0. 48. 0 October: 📅 13: Running real-time workloads with improved performance 📅 08: KubeVirt v0. 46. 0 September: 📅 21: Import AWS AMIs as KubeVirt Golden Images 📅 08: KubeVirt v0. 45. 0 August: 📅 13: Running virtual machines in Istio service mesh 📅 09: KubeVirt v0. 44. 0 July: 📅 16: Kubernetes Authentication Options using KubeVirt Client Library 📅 09: KubeVirt v0. 43. 0 June: 📅 08: KubeVirt v0. 42. 0 May: 📅 12: KubeVirt v0. 41. 0 April: 📅 30: Using Intel vGPUs with Kubevirt 📅 21: Automated Windows Installation With Tekton Pipelines 📅 19: KubeVirt v0. 40. 0 March: 📅 10: KubeVirt v0. 39. 0 📅 03: The KubeVirt Summit 2021 is a wrap! February: 📅 08: KubeVirt v0. 38. 0 January: 📅 18: KubeVirt v0. 37. 0 📅 12: KubeVirt Summit is coming! 2020: December: 📅 16: KubeVirt v0. 36. 0 📅 10: Monitoring KubeVirt VMs from the inside 📅 10: Customizing images for containerized VMs part I 📅 04: High Availability -- RunStrategies for Virtual Machines November: 📅 09: KubeVirt v0. 35. 0 October: 📅 21: Multiple Network Attachments with bridge CNI 📅 07: KubeVirt v0. 34. 0 September: 📅 15: KubeVirt v0. 33. 0 August: 📅 11: KubeVirt v0. 32. 0 📅 06: Import virtual machine from oVirt July: 📅 20: Minikube KubeVirt addon 📅 09: KubeVirt v0. 31. 0 📅 01: Common-templates June: 📅 22: Migrate a sample Windows workload to Kubernetes using KubeVirt and CDI 📅 05: KubeVirt v0. 30. 0 May: 📅 25: SELinux, from basics to KubeVirt 📅 12: KubeVirt VM Image Usage Patterns 📅 06: KubeVirt v0. 29. 0 April: 📅 30: KubeVirt Operation Fundamentals 📅 29: KubeVirt Security Fundamentals 📅 28: KubeVirt Architecture Fundamentals 📅 09: KubeVirt v0. 28. 0 March: 📅 22: Live Migration in KubeVirt 📅 06: KubeVirt v0. 27. 0 February: 📅 25: Advanced scheduling using affinity and anti-affinity rules 📅 14: KubeVirt: installing Microsoft Windows from an ISO 📅 07: KubeVirt v0. 26. 0 📅 06: NA KubeCon 2019 - KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt - David Vossel, Red Hat & Vishesh Tanksale, NVIDIA 📅 01: NA KubeCon 2019 - KubeVirt introduction by Steve Gordon and Chandrakanth Jakkidi January: 📅 24: Managing KubeVirt with OpenShift Web Console 📅 21: KubeVirt Laboratory 3, upgrades 📅 13: KubeVirt v0. 25. 0 2019: December: 📅 17: KubeVirt user interface options 📅 10: KubeVirt Laboratory 2, experimenting with CDI 📅 04: KubeVirt Laboratory 1, use KubeVirt 📅 03: KubeVirt v0. 24. 0 November: 📅 28: KubeVirt basic operations video 📅 22: Jenkins Infra upgrade 📅 12: KubeVirt at KubeCon + CloudNativeCon North America 📅 11: Access Virtual Machines' graphic console using noVNC 📅 04: KubeVirt v0. 23. 0 October: 📅 31: Prow jobs for KubeVirt website and Tutorial repo 📅 31: Jenkins Jobs for KubeVirt lab validation 📅 30: Persistent storage of your Virtual Machines in KubeVirt with Rook 📅 23: KubeVirt on Kubernetes with CRI-O from scratch - Installing KubeVirt 📅 16: KubeVirt on Kubernetes with CRI-O from scratch - Installing Kubernetes 📅 10: KubeVirt v0. 22. 0 📅 09: KubeVirt on Kubernetes with CRI-O from scratch September: 📅 09: KubeVirt v0. 21. 0 📅 09: KubeVirt is now part of CNCF Sandbox August: 📅 09: KubeVirt v0. 20. 0 📅 09: KubeVirt Condition Types Renamed 📅 01: KubeVirt Condition Types Rename in Custom Resource July: 📅 30: Node Drain in KubeVirt 📅 29: How to import VM into KubeVirt 📅 12: Website roadmap 📅 08: KubeVirt with Ansible, part 2 📅 05: KubeVirt v0. 19. 0 June: 📅 05: KubeVirt v0. 18. 0 📅 04: KubeVirt vagrant provider May: 📅 21: KubeVirt with Ansible, part 1 – Introduction 📅 06: KubeVirt v0. 17. 0 April: 📅 17: Hyper Converged Operator 📅 05: KubeVirt v0. 16. 0 March: 📅 14: More About Kubevirt Metrics 📅 05: KubeVirt v0. 15. 0 February: 📅 22: Federated Kubevirt 📅 04: KubeVirt v0. 14. 0 January: 📅 22: An Overview To Kubevirt Metrics 📅 15: KubeVirt v0. 13. 0 📅 11: KubeVirt v0. 12. 0 2018: December: 📅 13: Kubevirt Autolatest 📅 06: KubeVirt v0. 11. 0 November: 📅 26: Kubevirt At Kubecon Na 📅 20: Ignition Support 📅 16: New Volume Types 📅 08: KubeVirt v0. 10. 0 October: 📅 10: Cdi Datavolumes 📅 09: Containerized Data Importer 📅 04: KubeVirt v0. 9. 0 📅 03: Kubevirt Network Rehash September: 📅 12: Attaching To Multiple Networks 📅 11: Kubevirt Memory Overcommit 📅 06: KubeVirt v0. 8. 0 August: 📅 08: Kubevirtci July: 📅 23: Kubevirt V0. 7. 0 📅 04: KubeVirt v0. 7. 0 📅 03: Unit Test Howto June: 📅 21: Run Istio With Kubevirt 📅 20: Kvm Using Device Plugins 📅 13: Proxy VM Conclusion 📅 11: KubeVirt v0. 6. 0 📅 07: Non Dockerized Build 📅 03: Research Run Vms With Istio Service Mesh May: 📅 22: Use Vs Code For Kube Virt Development 📅 16: Ovn Multi Network Plugin For Kubernetes Kubetron 📅 16: Use Glusterfs Cloning With Kubevirt 📅 16: Kubevirt Api Access Control 📅 08: Kubevirt Objects 📅 07: Deploying Vms On Kubernetes Glusterfs Kubevirt 📅 04: KubeVirt v0. 5. 0 📅 04: Deploying Kubevirt On A Single Ovirt Vm April: 📅 27: This Week In Kube Virt 23 📅 25: Kubevirt Network Deep Dive 📅 06: This Week In Kube Virt 22 📅 06: KubeVirt v0. 4. 0 March: 📅 20: This Week In Kube Virt 21 📅 08: This Week In Kube Virt 20 📅 08: KubeVirt v0. 3. 0 February: 📅 23: This Week In Kube Virt 19 📅 10: This Week In Kube Virt 18 January: 📅 30: This Week In Kube Virt 17 📅 19: This Week In Kube Virt 16 Size Xl 📅 19: This Week In Kube Virt 16 Holiday Wrap Up Edition 📅 05: Some Notes On Some Highlights Of V020 📅 05: Kube Virt v0. 2. 0 2017: December: 📅 15: This Week In Kube Virt 15 📅 08: This Week In Kube Virt 14 📅 08: Kube Virt v0. 1. 0 📅 04: This Week In Kube Virt 13 November: 📅 25: This Week In Kube Virt 12 📅 21: This Week In Kube Virt 11 📅 10: This Week In Kube Virt 10 Base 10 📅 07: Kube Virt v0. 0. 4 📅 06: This Week In Kube Virt 9 October: 📅 28: This Week In Kube Virt 8 📅 24: This Week In Kube Virt 7 📅 15: This Week In Kube Virt 6 📅 06: This Week In Kube Virt 5 September: 📅 29: This Week In Kube Virt 4 📅 22: This Week In Kube Virt 3 📅 15: This Week In Kube Virt 2 📅 08: This Week In Kube Virt 1 July: 📅 18: Comparing KubeVirt to other technologies 📅 18: The Role of LibVirt " + "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Post calendar: JanFebMarAprMayJunJulAugSepOctNovDec2023  3 1 3 2 2 20223112121311  20212123112222 12020352432321214201932222253274420185234863134422017      2 4454 2023: November: 📅 07: Announcing KubeVirt v1. 1 📅 06: KubeVirt v1. 1. 0 September: 📅 06: Running KubeVirt with Cluster Autoscaler 📅 05: Managing KubeVirt VMs with Ansible July: 📅 24: NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes 📅 11: KubeVirt v1. 0 has landed! 📅 06: KubeVirt v1. 0. 0 May: 📅 31: Secondary networks connected to the physical underlay for KubeVirt VMs using OVN-Kubernetes March: 📅 06: Secondary networks for KubeVirt VMs using OVN-Kubernetes 📅 03: KubeVirt Summit 2023! 📅 01: KubeVirt v0. 59. 0 2022: October: 📅 13: KubeVirt v0. 58. 0 September: 📅 12: KubeVirt v0. 57. 0 August: 📅 18: KubeVirt v0. 56. 0 📅 12: Simplifying KubeVirt's `VirtualMachine` UX with Instancetypes and Preferences 📅 02: KubeVirt: installing Microsoft Windows 11 from an ISO July: 📅 14: KubeVirt v0. 55. 0 June: 📅 28: KubeVirt at KubeCon EU 2022 📅 08: KubeVirt v0. 54. 0 May: 📅 09: KubeVirt v0. 53. 0 April: 📅 08: KubeVirt v0. 52. 0 📅 03: Load-balancer for virtual machines on bare metal Kubernetes clusters March: 📅 08: KubeVirt v0. 51. 0 February: 📅 09: KubeVirt v0. 50. 0 January: 📅 25: Dedicated migration network in KubeVirt 📅 24: KubeVirt Summit is coming back! 📅 11: KubeVirt v0. 49. 0 2021: December: 📅 06: KubeVirt v0. 48. 0 October: 📅 13: Running real-time workloads with improved performance 📅 08: KubeVirt v0. 46. 0 September: 📅 21: Import AWS AMIs as KubeVirt Golden Images 📅 08: KubeVirt v0. 45. 0 August: 📅 13: Running virtual machines in Istio service mesh 📅 09: KubeVirt v0. 44. 0 July: 📅 16: Kubernetes Authentication Options using KubeVirt Client Library 📅 09: KubeVirt v0. 43. 0 June: 📅 08: KubeVirt v0. 42. 0 May: 📅 12: KubeVirt v0. 41. 0 April: 📅 30: Using Intel vGPUs with Kubevirt 📅 21: Automated Windows Installation With Tekton Pipelines 📅 19: KubeVirt v0. 40. 0 March: 📅 10: KubeVirt v0. 39. 0 📅 03: The KubeVirt Summit 2021 is a wrap! February: 📅 08: KubeVirt v0. 38. 0 January: 📅 18: KubeVirt v0. 37. 0 📅 12: KubeVirt Summit is coming! 2020: December: 📅 16: KubeVirt v0. 36. 0 📅 10: Monitoring KubeVirt VMs from the inside 📅 10: Customizing images for containerized VMs part I 📅 04: High Availability -- RunStrategies for Virtual Machines November: 📅 09: KubeVirt v0. 35. 0 October: 📅 21: Multiple Network Attachments with bridge CNI 📅 07: KubeVirt v0. 34. 0 September: 📅 15: KubeVirt v0. 33. 0 August: 📅 11: KubeVirt v0. 32. 0 📅 06: Import virtual machine from oVirt July: 📅 20: Minikube KubeVirt addon 📅 09: KubeVirt v0. 31. 0 📅 01: Common-templates June: 📅 22: Migrate a sample Windows workload to Kubernetes using KubeVirt and CDI 📅 05: KubeVirt v0. 30. 0 May: 📅 25: SELinux, from basics to KubeVirt 📅 12: KubeVirt VM Image Usage Patterns 📅 06: KubeVirt v0. 29. 0 April: 📅 30: KubeVirt Operation Fundamentals 📅 29: KubeVirt Security Fundamentals 📅 28: KubeVirt Architecture Fundamentals 📅 09: KubeVirt v0. 28. 0 March: 📅 22: Live Migration in KubeVirt 📅 06: KubeVirt v0. 27. 0 February: 📅 25: Advanced scheduling using affinity and anti-affinity rules 📅 14: KubeVirt: installing Microsoft Windows from an ISO 📅 07: KubeVirt v0. 26. 0 📅 06: NA KubeCon 2019 - KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt - David Vossel, Red Hat & Vishesh Tanksale, NVIDIA 📅 01: NA KubeCon 2019 - KubeVirt introduction by Steve Gordon and Chandrakanth Jakkidi January: 📅 24: Managing KubeVirt with OpenShift Web Console 📅 21: KubeVirt Laboratory 3, upgrades 📅 13: KubeVirt v0. 25. 0 2019: December: 📅 17: KubeVirt user interface options 📅 10: KubeVirt Laboratory 2, experimenting with CDI 📅 04: KubeVirt Laboratory 1, use KubeVirt 📅 03: KubeVirt v0. 24. 0 November: 📅 28: KubeVirt basic operations video 📅 22: Jenkins Infra upgrade 📅 12: KubeVirt at KubeCon + CloudNativeCon North America 📅 04: KubeVirt v0. 23. 0 October: 📅 31: Prow jobs for KubeVirt website and Tutorial repo 📅 31: Jenkins Jobs for KubeVirt lab validation 📅 30: Persistent storage of your Virtual Machines in KubeVirt with Rook 📅 23: KubeVirt on Kubernetes with CRI-O from scratch - Installing KubeVirt 📅 16: KubeVirt on Kubernetes with CRI-O from scratch - Installing Kubernetes 📅 10: KubeVirt v0. 22. 0 📅 09: KubeVirt on Kubernetes with CRI-O from scratch September: 📅 09: KubeVirt v0. 21. 0 📅 09: KubeVirt is now part of CNCF Sandbox August: 📅 09: KubeVirt v0. 20. 0 📅 09: KubeVirt Condition Types Renamed 📅 01: KubeVirt Condition Types Rename in Custom Resource July: 📅 30: Node Drain in KubeVirt 📅 29: How to import VM into KubeVirt 📅 12: Website roadmap 📅 08: KubeVirt with Ansible, part 2 📅 05: KubeVirt v0. 19. 0 June: 📅 05: KubeVirt v0. 18. 0 📅 04: KubeVirt vagrant provider May: 📅 21: KubeVirt with Ansible, part 1 – Introduction 📅 06: KubeVirt v0. 17. 0 April: 📅 17: Hyper Converged Operator 📅 05: KubeVirt v0. 16. 0 March: 📅 14: More About Kubevirt Metrics 📅 05: KubeVirt v0. 15. 0 February: 📅 22: Federated Kubevirt 📅 04: KubeVirt v0. 14. 0 January: 📅 22: An Overview To Kubevirt Metrics 📅 15: KubeVirt v0. 13. 0 📅 11: KubeVirt v0. 12. 0 2018: December: 📅 13: Kubevirt Autolatest 📅 06: KubeVirt v0. 11. 0 November: 📅 26: Kubevirt At Kubecon Na 📅 20: Ignition Support 📅 16: New Volume Types 📅 08: KubeVirt v0. 10. 0 October: 📅 10: Cdi Datavolumes 📅 09: Containerized Data Importer 📅 04: KubeVirt v0. 9. 0 📅 03: Kubevirt Network Rehash September: 📅 12: Attaching To Multiple Networks 📅 11: Kubevirt Memory Overcommit 📅 06: KubeVirt v0. 8. 0 August: 📅 08: Kubevirtci July: 📅 23: Kubevirt V0. 7. 0 📅 04: KubeVirt v0. 7. 0 📅 03: Unit Test Howto June: 📅 21: Run Istio With Kubevirt 📅 20: Kvm Using Device Plugins 📅 13: Proxy VM Conclusion 📅 11: KubeVirt v0. 6. 0 📅 07: Non Dockerized Build 📅 03: Research Run Vms With Istio Service Mesh May: 📅 22: Use Vs Code For Kube Virt Development 📅 16: Ovn Multi Network Plugin For Kubernetes Kubetron 📅 16: Use Glusterfs Cloning With Kubevirt 📅 16: Kubevirt Api Access Control 📅 08: Kubevirt Objects 📅 07: Deploying Vms On Kubernetes Glusterfs Kubevirt 📅 04: KubeVirt v0. 5. 0 📅 04: Deploying Kubevirt On A Single Ovirt Vm April: 📅 27: This Week In Kube Virt 23 📅 25: Kubevirt Network Deep Dive 📅 06: This Week In Kube Virt 22 📅 06: KubeVirt v0. 4. 0 March: 📅 20: This Week In Kube Virt 21 📅 08: This Week In Kube Virt 20 📅 08: KubeVirt v0. 3. 0 February: 📅 23: This Week In Kube Virt 19 📅 10: This Week In Kube Virt 18 January: 📅 30: This Week In Kube Virt 17 📅 19: This Week In Kube Virt 16 Size Xl 📅 19: This Week In Kube Virt 16 Holiday Wrap Up Edition 📅 05: Some Notes On Some Highlights Of V020 📅 05: Kube Virt v0. 2. 0 2017: December: 📅 15: This Week In Kube Virt 15 📅 08: This Week In Kube Virt 14 📅 08: Kube Virt v0. 1. 0 📅 04: This Week In Kube Virt 13 November: 📅 25: This Week In Kube Virt 12 📅 21: This Week In Kube Virt 11 📅 10: This Week In Kube Virt 10 Base 10 📅 07: Kube Virt v0. 0. 4 📅 06: This Week In Kube Virt 9 October: 📅 28: This Week In Kube Virt 8 📅 24: This Week In Kube Virt 7 📅 15: This Week In Kube Virt 6 📅 06: This Week In Kube Virt 5 September: 📅 29: This Week In Kube Virt 4 📅 22: This Week In Kube Virt 3 📅 15: This Week In Kube Virt 2 📅 08: This Week In Kube Virt 1 July: 📅 18: Comparing KubeVirt to other technologies 📅 18: The Role of LibVirt " }, { - "id": 189, + "id": 188, "url": "/videos/demos", "title": "Demos", "author" : "", "tags" : "", "body": " - KubeVirt Demos Playlist: A playlist of all of the KubeVirt demos. " }, { - "id": 190, + "id": 189, "url": "/docs/", "title": "Introduction", "author" : "", "tags" : "", "body": " - Check out the user guide! " }, { - "id": 191, + "id": 190, "url": "/pages/ec2", "title": "Easy install using AWS", "author" : "", "tags" : "", "body": " - " }, { - "id": 192, + "id": 191, "url": "/gallery/", "title": "Gallery", "author" : "", "tags" : "picture gallery, photos", "body": " - DevConf. cz 2020 in pictures : January 31, 2020 This article shows some of the KubeVirt presence in DevConf. cz 2020 Read More FOSDEM 2020 in pictures : February 2, 2020 This article shows KubeVirt presence at FOSDEM 2020 Read More " }, { - "id": 193, + "id": 192, "url": "/pages/gcp", "title": "Easy install using GCP", "author" : "", "tags" : "", "body": " - " }, , , , { - "id": 194, + "id": 193, "url": "/blogs/", "title": "Blogs", "author" : "", "tags" : "", - "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Announcing KubeVirt v1. 1: November 07, We are very pleased to announce the release of KubeVirt v1. 1! Read More KubeVirt v1. 1. 0: November 06, 2023 This article provides information about KubeVirt release v1. 1. 0 changes Read More Running KubeVirt with Cluster Autoscaler: September 6, 2023 This post explains how to set up KubeVirt with Cluster Autoscaler on EKS Read More Managing KubeVirt VMs with Ansible: September 5, 2023 This post explains how to manage KubeVirt VMs with the kubevirt. core Ansible collection. Read More NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes: July 21, 2023 This post explains how to configure NetworkPolicies for KubeVirt VMs secondary networks. Read More New 1 of 37 Old " + "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Announcing KubeVirt v1. 1: November 07, We are very pleased to announce the release of KubeVirt v1. 1! Read More KubeVirt v1. 1. 0: November 06, 2023 This article provides information about KubeVirt release v1. 1. 0 changes Read More Running KubeVirt with Cluster Autoscaler: September 6, 2023 This post explains how to set up KubeVirt with Cluster Autoscaler on EKS Read More Managing KubeVirt VMs with Ansible: September 5, 2023 This post explains how to manage KubeVirt VMs with the kubevirt. core Ansible collection. Read More NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes: July 21, 2023 This post explains how to configure NetworkPolicies for KubeVirt VMs secondary networks. Read More New 1 of 36 Old " }, , { - "id": 195, + "id": 194, "url": "/videos/kubevirt-summit", "title": "Kubevirt Summit", "author" : "", "tags" : "", "body": " - Introduction And Kubevirt History: In the first session of the KubeVirt Summit, Fabian Deutsch (@dummdida) talks about the project's history. Automated Migration of VMs from VMware or Openstack to KubeVirt: KubeVirt opens scenarios for a Kubernetes based infrastructure that can handle both VMs and containers. Wouldn't it be great to just automatically move all your Virtual Machines from legacy environments and consolidate your whole infrastructure around Kubernetes and KubeVirt? Avoid merging broken code with Prow: This session is about creation of prow jobs in general. Prow is a Kubernetes based CI/CD system. Building great VMs with common templates: Common templates are covering most of the nowadays operating systems. Users can easily create e. g. Windows VMs, without complicated settings. Automate KubeVirt with Tekton pipelines: This talk introduces a new effort to bring KubeVirt specific tasks to Tekton Pipelines (CI/CD-style pipelines on k8s). KubeVirt data protection and forensics forum: Let's get together to discuss plans/ideas to extend KubeVirt's data protection and forensics functionality. Zero downtime KubeVirt updates: In this session I'll cover the general strategy behind how we approach updating KubeVirt from a developer's perspective as well as discuss future improvments to our update process. Introducing Volume Hotplug in KubeVirt: Introduction into the current state of volume hotpluging in KubeVirt, what is possible, what is not possible and what are the challenges. Accelerating VNF and CNF with PCI passthrough and KubeVirt: This sesion introduces PCI device passthrough to containers and VMs managed by KubeVirt. Harvester an OSS HCI solution built on Kubernetes and KubeVirt: Project Harvester is a new open source alternative to traditional proprietary hyperconverged infrastructure software. It is built on top of cutting-edge open source technologies including Kubernetes, KubeVirt and Longhorn. Kubevirt Live migration and SRIOV: KubeVirt Live Migration now supports VM's connected to SRIOV NIC's. Moving oVirt and VMware VMs to KubeVirt with VM Import Operator and Forklift. : VM Import Operator (VMIO) allows Kubernetes administrators to easily import their oVirt- and VMware- managed virtual machines to KubeVirt. KubeVirt opinionated deployment via Hyperconverged Cluster Operator: The Hyperconverged Cluster Operator can be installed on bare metal server clusters in a matter of minutes, even from a GUI, without requiring a deep knowledge of Kubernetes internals. Privilege dropping, one capability at a time: KubeVirt's architecture is composed of two main components virt-handler, a trusted DaemonSet, running in each node, which operates as the virtualization agent, and virt-launcher, an untrusted Kubernetes pod encapsulating a single libvirt + qemu process. Introducing the new KubeVirt driver for Ansible Molecule: Molecule is a well known test framework for Ansible. But when you run your Molecule test in Kubernetes, no real good solution exists. I'm working on creating new Molecule driver for KubeVirt to find a better approach and get a 100% pure Kubernetes solution. Virtual Machine Batch API: KubeVirt extends the Kubernetes ReplicaSets API to provide Virtual Machines with similar functionality and the same can be done with Kubernetes Jobs. In order to bulk schedule VirtualMachines, an admin could use a VirtualMachine Batch API, a VirtualMachineJob, to launch many VirtualMachines from a single API call. CPU Pinning with custom policies: KubeVirt supports CPU pinning via the Kubernetes CPU Manager. However there are a few gaps with achieving CPU pinning only via CPU Manager It supports only static policy and doesn’t allow for custom pinning. It supports only Guaranteed QoS class. The Road to Version 1: A few months ago the KubeVirt community started to discuss what would be the requirements that KubeVirt should meet in order to release KubeVirt Version 1. 0. Moving a Visual Effects Studio to the cloud with Kubernetes and KubeVirt: As the rapid transition to remote work happened, VFX studios and designers used to beefy workstations, on-site storage clusters and high performance networking have had to scramble to make those resources available to people at home. Office Hours Q&A with KubeVirt maintainers: Our final session is an opportunity for you to ask all your KubeVirt questions, whether they're about the project, or they are about using KubeVirt in production. Maintainers and experts will be on hand. " }, , { - "id": 196, + "id": 195, "url": "/labs/kubernetes/lab1", "title": "Use KubeVirt", "author" : "", "tags" : "laboratory, kubevirt installation, start vm, stop vm, delete vm, access console, lab", "body": " - Use KubeVirt You can experiment with this lab online at KillercodaCreate a Virtual Machine: Download the VM manifest and explore it. Note it uses a container disk and as such doesn’t persist data. Such container disks currently exist for alpine, cirros and fedora. wget https://kubevirt. io/labs/manifests/vm. yamlless vm. yamlApply the manifest to Kubernetes. kubectl apply -f https://kubevirt. io/labs/manifests/vm. yamlvirtualmachine. kubevirt. io testvm created virtualmachineinstancepreset. kubevirt. io small createdManage Virtual Machines (optional):: To get a list of existing Virtual Machines. Note the running status. kubectl get vmskubectl get vms -o yaml testvmTo start a Virtual Machine you can use: virtctl start testvmIf you installed virtctl via krew, you can use kubectl virt: # Start the virtual machine:kubectl virt start testvm# Stop the virtual machine:kubectl virt stop testvmAlternatively you could use kubectl patch: # Start the virtual machine:kubectl patch virtualmachine testvm --type merge -p \ '{ spec :{ running :true}}'# Stop the virtual machine:kubectl patch virtualmachine testvm --type merge -p \ '{ spec :{ running :false}}'Now that the Virtual Machine has been started, check the status. Note the running status. kubectl get vmiskubectl get vmis -o yaml testvmAccessing VMs (serial console): Connect to the serial console of the Cirros VM. Hit return / enter a few times and login with the displayed username and password. virtctl console testvmDisconnect from the virtual machine console by typing: ctrl+]. Controlling the State of the VM: To shut it down: virtctl stop testvmTo delete a Virtual Machine: kubectl delete vm testvmThis concludes this section of the lab. You can watch how the laboratory is done in the following video: Next Lab " }, { - "id": 197, + "id": 196, "url": "/labs/kubernetes/lab2", "title": "Experiment with CDI", "author" : "", "tags" : "laboratory, importer, vm import, containerized data importer, CDI, lab", "body": " - Experiment with the Containerized Data Importer (CDI) You can experiment with this lab online at KillercodaIn this lab, you will learn how to use Containerized Data Importer (CDI) to import Virtual Machine images for use with Kubevirt. CDI simplifies the process of importing data from various sources into Kubernetes Persistent Volumes, making it easier to use that data within your virtual machines. CDI introduces DataVolumes, custom resources meant to be used as abstractions of PVCs. A custom controller watches for DataVolumes and handles the creation of a target PVC with all the spec and annotations required for importing the data. Depending on the type of source, other specific CDI controller will start the import process and create a raw image named disk. img with the desired content into the target PVC. This ‘lab’ targets deployment on one node as it uses Minikube and its hostpath storage class which can create PersistentVolumes (PVs) on only one node at a time. In production use, a StorageClass capable of ReadWriteOnce or better operation should be deployed to ensure PVs are accessible from any node. Install the CDI: In this exercise we deploy the latest release of CDI using its Operator. export VERSION=$(basename $(curl -s -w %{redirect_url} https://github. com/kubevirt/containerized-data-importer/releases/latest))kubectl create -f https://github. com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator. yamlkubectl create -f https://github. com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr. yamlCheck the status of the cdi CustomResource (CR) created in the previous step. The CR’s Phase will change from Deploying to Deployed as the pods it deploys are created and reach the Running state. kubectl get cdi cdi -n cdiReview the “cdi” pods that were added. kubectl get pods -n cdiUse CDI to Import a Disk Image: First, you need to create a DataVolume that points to the source data you want to import. In this example, we’ll use a DataVolume to import a Fedora37 Cloud Image into a PVC and launch a Virtual Machine making use of it. cat <<EOF > dv_fedora. ymlapiVersion: cdi. kubevirt. io/v1beta1kind: DataVolumemetadata: name: fedora spec: storage: resources: requests: storage: 5Gi source: http: url: https://download. fedoraproject. org/pub/fedora/linux/releases/37/Cloud/x86_64/images/Fedora-Cloud-Base-37-1. 7. x86_64. raw. xz EOFkubectl create -f dv_fedora. ymlA custom CDI controller will use this DataVolume to create a PVC with the same name and proper spec/annotations so that an import-specific controller detects it and launches an importer pod. This pod will gather the image specified in the source field. kubectl get pvc fedora -o yamlkubectl get pod # Make note of the pod name assigned to the import processkubectl logs -f importer-fedora-pnbqh # Substitute your importer-fedora pod name here. Notice that the importer downloaded the publicly available Fedora Cloud qcow image. Once the importer pod completes, this PVC is ready for use in kubevirt. If the importer pod completes in error, you may need to retry it or specify a different URL to the fedora cloud image. To retry, first delete the importer pod and the DataVolume, and then recreate the DataVolume. kubectl delete -f dv_fedora. yml --waitkubectl create -f dv_fedora. ymlLet’s create a Virtual Machine making use of it. Review the file vm1_pvc. yml. wget https://kubevirt. io/labs/manifests/vm1_pvc. ymlcat vm1_pvc. ymlWe change the yaml definition of this Virtual Machine to inject the default public key of user in the cloud instance. # Generate a password-less SSH key using the default location. ssh-keygenPUBKEY=`cat ~/. ssh/id_rsa. pub`sed -i s%ssh-rsa. *%$PUBKEY% vm1_pvc. ymlkubectl create -f vm1_pvc. ymlThis will create and start a Virtual Machine named vm1. We can use the following command to check our Virtual Machine is running and to gather its IP. You are looking for the IP address beside the virt-launcher pod. kubectl get pod -o wideSince we are running an all in one setup, the corresponding Virtual Machine is actually running on the same node, we can check its qemu process. ps -ef | grep qemu | grep vm1Wait for the Virtual Machine to boot and to be available for login. You may monitor its progress through the console. The speed at which the VM boots depends on whether baremetal hardware is used. It is much slower when nested virtualization is used, which is likely the case if you are completing this lab on an instance on a cloud provider. virtctl console vm1Disconnect from the virtual machine console by typing: ctrl+] Finally, we will connect to vm1 Virtual Machine (VM) as a regular user would do, i. e. via ssh. This can be achieved by just ssh to the gathered ip in case we are in the Kubernetes software defined network (SDN). This is true, if we are connected to a node that belongs to the Kubernetes cluster network. Probably if you followed the Easy install using AWS or Easy install using GCP your cloud instance is already part of the cluster. ssh fedora@VM_IPOn the other side, if you followed Easy install using minikube take into account that you will need to ssh into Minikube first, as shown below. $ kubectl get vmiNAME AGE PHASE IP NODENAMEvm1 109s Running 172. 17. 0. 16 minikube$ minikube ssh _ _ _ _ ( ) ( ) ___ ___ (_) ___ (_)| |/') _ _ | |_ __/' _ ` _ `\| |/' _ `\| || , < ( ) ( )| '_`\ /'__`\| ( ) ( ) || || ( ) || || |\`\ | (_) || |_) )( ___/(_) (_) (_)(_)(_) (_)(_)(_) (_)`\___/'(_,__/'`\____)$ ssh fedora@172. 17. 0. 16The authenticity of host '172. 17. 0. 16 (172. 17. 0. 16)' can't be established. ECDSA key fingerprint is SHA256:QmJUvc8vbM2oXiEonennW7+lZ8rVRGyhUtcQBVBTnHs. Are you sure you want to continue connecting (yes/no)? yesWarning: Permanently added '172. 17. 0. 16' (ECDSA) to the list of known hosts. fedora@172. 17. 0. 16's password:Finally, on a usual situation you will probably want to give access to your vm1 VM to someone else from outside the Kubernetes cluster nodes. Someone who is actually connecting from his or her laptop. This can be achieved with the virtctl tool already installed in Easy install using minikube. Note that this is the same case as connecting from our laptop to vm1 VM running on our local Minikube instance First, we are going expose the ssh port of the vm1 as NodePort type. Then verify that the Kubernetes object service was created successfully on a random port of the Minikube or cloud instance. $ virtctl expose vmi vm1 --name=vm1-ssh --port=20222 --target-port=22 --type=NodePort Service vm1-ssh successfully exposed for vmi vm1$ kubectl get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEvm1-ssh NodePort 10. 101. 226. 150 <none> 20222:32495/TCP 24mOnce exposed successfully, check the IP of your Minikube VM or cloud instance and verify you can reach the VM using your public SSH key previously configured. In case of cloud instances verify that security group applied allows traffic to the random port created. minikube ip192. 168. 39. 74$ ssh -i ~/. ssh/id_rsa fedora@192. 168. 39. 74 -p 32495 Last login: Wed Oct 9 13:59:29 2019 from 172. 17. 0. 1 [fedora@vm1 ~]$This concludes this section of the lab. You can watch how the laboratory is done in the following video: Previous Lab " }, { - "id": 198, + "id": 197, "url": "/labs/kubernetes/lab3", "title": "KubeVirt Upgrades", "author" : "", "tags" : "laboratory, kubevirt upgrades, upgrade, lifecycle, lab", "body": " - Experiment with KubeVirt UpgradesDeploy KubeVirt: NOTE: For upgrading to the latest KubeVirt version, first we will install a specific older version of the operator, if you’re already using latest, please start with an older KubeVirt version and follow Lab1 to deploy KubeVirt on it, but using version v0. 56. 1 instead. If you’ve already covered this, jump over this section. Let’s stick to use the release v0. 56. 1: export KUBEVIRT_VERSION=v0. 56. 1Let’s deploy the KubeVirt Operator by running the following command: $ kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator. yamlnamespace/kubevirt created. . . deployment. apps/virt-operator createdLet’s wait for the operator to become ready: $ kubectl wait --for condition=ready pod -l kubevirt. io=virt-operator -n kubevirt --timeout=100spod/virt-operator-5ddb4674b9-6fbrv condition metNow let’s deploy KubeVirt by creating a Custom Resource that will trigger the ‘operator’ and perform the deployment: $ kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr. yamlkubevirt. kubevirt. io/kubevirt createdIf you’re running in a virtualized environment, in order to be able to run VMs here we need to pre-configure KubeVirt so it uses software-emulated virtualization instead of trying to use real hardware virtualization. $ kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{ spec :{ configuration :{ developerConfiguration :{ useEmulation :true}}}}'configmap/kubevirt-config createdLet’s check the deployment: $ kubectl get pods -n kubevirtOnce it’s ready, it will show something similar to the information below: $ kubectl get pods -n kubevirtNAME READY STATUS RESTARTS AGEvirt-api-7fc57db6dd-g4s4w 1/1 Running 0 3mvirt-api-7fc57db6dd-zd95q 1/1 Running 0 3mvirt-controller-6849d45bcc-88zd4 1/1 Running 0 3mvirt-controller-6849d45bcc-cmfzk 1/1 Running 0 3mvirt-handler-fvsqw 1/1 Running 0 3mvirt-operator-5649f67475-gmphg 1/1 Running 0 4mvirt-operator-5649f67475-sw78k 1/1 Running 0 4mDeploy a VM: Once all the containers are with the status “Running” you can execute the command below for applying a YAML definition of a virtual machine into our current Kubernetes environment: First, let’s wait for all the pods to be ready like previously provided example: $ kubectl wait --for condition=ready pod -l kubevirt. io=virt-api -n kubevirt --timeout=100spod/virt-api-5ddb4674b9-6fbrv condition met$ kubectl wait --for condition=ready pod -l kubevirt. io=virt-controller -n kubevirt --timeout=100spod/virt-controller-p3d4o-1fvfz condition met$ kubectl wait --for condition=ready pod -l kubevirt. io=virt-handler -n kubevirt --timeout=100spod/virt-handler-1b4n3z4674b9-sf1rl condition metAnd proceed with the VM creation: $ kubectl apply -f https://kubevirt. io/labs/manifests/vm. yamlvirtualmachine. kubevirt. io/testvm createdUsing the command below for checking that the VM is defined: $ kubectl get vmsNAME AGE RUNNING VOLUMEtestvm 22s falseNotice from the output that the VM is not running yet. To start a VM, virtctl~~~` should be used: $ virtctl start testvmVM testvm was scheduled to startNow you can check again the VM status: $ kubectl get vmsNAME AGE RUNNING VOLUMEtestvm 0s falseOnce the VM is running you can inspect its status: kubectl get vmis$ kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 10s SchedulingOnce it’s ready, the command above will print something like: $ kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 1m Running 10. 32. 0. 11 masterWhile the PHASE is still Scheduling you can run the same command for checking again: $ kubectl get vmisOnce the PHASE will change to Running, we’re ready for upgrading KubeVirt. Define the next version to upgrade to: KubeVirt starting from v0. 17. 0 onwards, allows to upgrade one version at a time, by using two approaches as defined in the user-guide: Patching the imageTag value in the KubeVirt CR spec Updating the operator if no imageTag is defined (defaulting to upgrade to match the operator version)WARNING: In both cases, the supported scenario is updating from N-1 to N NOTE: Zero downtime rolling updates are supported starting with release v0. 17. 0 onwards. Updating from any release prior to the KubeVirt v0. 17. 0 release is not supported. Performing the upgrade: Updating the KubeVirt operator if no imageTag value is setWhen no imageTag value is set in the KubeVirt CR, the system assumes that the version of KubeVirt is locked to the version of the operator. This means that updating the operator will result in the underlying KubeVirt installation being updated as well. Let’s upgrade to the newer version after the one installed (v0. 56. 1 -> v0. 57. 0): $ export KUBEVIRT_VERSION=v0. 57. 0$ kubectl apply -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator. yamlWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply. . . deployment. apps/virt-operator configuredNOTE: Compared to the first step of the lab now we are using apply instead of create to deploy the newer version because the operator already exists. In any case, we can check that the VM is still running $ kubectl get vmisNAME AGE PHASE IP NODENAMEtestvm 1m Running 10. 32. 0. 11 masterFinal upgrades: You can keep testing in this lab updating ‘one version at a time’ until reaching the value of KUBEVIRT_LATEST_VERSION: $ export KUBEVIRT_LATEST_VERSION=$(curl -s https://api. github. com/repos/kubevirt/kubevirt/releases/latest | jq -r . tag_name)$ echo -e CURRENT: $KUBEVIRT_VERSION LATEST: $KUBEVIRT_LATEST_VERSION Compare the values between and continue upgrading ‘one release at a time’ by: Choosing the target version: $ export KUBEVIRT_VERSION=vX. XX. XUpdating the operator to that release: $ kubectl apply -f https://github. com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator. yamlWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply. . . deployment. apps/virt-operator configuredThe following command shows how to check the operator version $ echo $(kubectl get deployment. apps virt-operator -n kubevirt -o jsonpath='{. spec. template. spec. containers[0]. env[?(@. name== KUBEVIRT_VERSION )]. value}')Wrap-up: Shutting down a VM works by either using virtctl or editing the VM. $ virtctl stop testvmVM testvm was scheduled to stopFinally, the VM can be deleted using: $ kubectl delete vms testvmvirtualmachine. kubevirt. io testvm deletedWhen updating using the operator, we can see that the ‘AGE’ of containers is similar between them, but when updating only the kubevirt version, the operator ‘AGE’ keeps increasing because it is not ‘recreated’. This concludes this section of the lab. You can watch how the laboratory is done in the following video: Previous Lab " }, { - "id": 199, + "id": 198, "url": "/labs/", "title": "Labs", "author" : "", "tags" : "", "body": " - Check out the Available labs on the side menu " }, , , , { - "id": 200, + "id": 199, "url": "/videos/community/meetings", "title": "Weekly Meetings", "author" : "", "tags" : "", "body": " - KubeVirt Community Weekly Meetings Playlist: A playlist of all of the KubeVirt community weekly meetings. KubeVirt Community SIG - Performance & Scale Weekly Meetings Playlist: A playlist of all of the KubeVirt SIG - Performance & Scale community weekly meetings. " }, { - "id": 201, + "id": 200, "url": "/labs/kubernetes/migration", "title": "Live Migration", "author" : "", "tags" : "laboratory, kubevirt installation, feature-gate, VM, Live Migration, lab", "body": " - Live MigrationLive Migration is a common virtualization featuresupported by KubeVirt where virtual machines running on one cluster node moveto another cluster node without shutting down the guest OS or its applications. To experiment with KubeVirt live migration in a Kubernetes test environment, somesetup is required. Start a Kubernetes cluster with the following requirements: Two or more nodes CNI plugin: Flannel is a good pick for proof on concept environments. Nested or emulated virtualization KubeVirtFor a simple test environment using Minikube, refer to the Minikube Quickstart on this site. Check the status of nodes and kubevirt: To check on the nodes and their IP ranges run: kubectl get nodes -o wideThis will return a report like NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIMEminikube Ready control-plane,master 2m43s v1. 20. 7 192. 168. 39. 240 <none> Buildroot 2020. 02. 12 4. 19. 182 docker://20. 10. 6minikube-m02 Ready <none> 118s v1. 20. 7 192. 168. 39. 245 <none> Buildroot 2020. 02. 12 4. 19. 182 docker://20. 10. 6Check that kubevirt has fully deployed: kubectl -n kubevirt get kubevirtNAME AGE PHASEkubevirt 3m20s DeployedEnable Live Migration: Live migration is, at the time of writing, not a standard feature in KubeVirt. To enable the feature, create a ConfigMap in the “kubevirt” Namespace called “kubevirt-config”. kubectl apply -f - <<EOFapiVersion: v1kind: ConfigMapmetadata: name: kubevirt-config namespace: kubevirt labels: kubevirt. io: data: feature-gates: LiveMigration EOFCreate a Virtual Machine: Next, create a VM. This lab uses the “testvm” from lab1. kubectl apply -f https://kubevirt. io/labs/manifests/vm. yamlvirtctl start testvmIn a multi-node environment, it is helpful to know on which node a pod is running. View its node using -o wide: kubectl get pod -o wideNotice in this example, the pod shows as running on NODE “minikube-m02”: NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESvirt-launcher-testvm-c8nzz 2/2 Running 0 32s 10. 244. 1. 12 minikube-m02 <none> <none>Start a Service on the Virtual Machine: Using virtctl, expose two ports for testing, ssh and http/8080: virtctl expose vmi testvm --name=testvm-ssh --port=22 --type=NodePortvirtctl expose vmi testvm --name=testvm-http --port=8080 --type=NodePortStart by logging in to the console and running a simple web server using netcat: virtctl console testvmThe default user “cirros” and its password are mentioned on the console loginprompt, use them to log in. Next, run the following while loop to continuouslyrespond to any http connection attempt with a test message: while true; do ( echo HTTP/1. 0 200 Ok ; echo; echo Migration test ) | nc -l -p 8080; doneLeave the loop running, and either break out of the console with CTRL-] or openanother terminal on the same machine. To test the service, several bits of information will need to be coordinated. To collect the minikube node IP address and the NodePort of the http service, run: IP=$(minikube ip)PORT=$(kubectl get svc testvm-http -o jsonpath='{. spec. ports[0]. nodePort}')Now use curl to read data from the simple web service: curl ${IP}:${PORT}This should output Migration test. If all is well, it is time to migrate thevirtual machine to another node. Migrate VM: To migrate the testvm vmi from one node to the other, run: virtctl migrate testvmTo ensure migration happens, watch the pods in “wide” view: kubectl get pods -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESvirt-launcher-testvm-8src7 0/2 Completed 0 5m 10. 244. 1. 14 minikube-m02 <none> <none>virt-launcher-testvm-zxlts 2/2 Running 0 21s 10. 244. 0. 7 minikube <none> <none>Notice the original virt-launcher pod has entered the Completed state and the virtual machine is now running on the minikube node. Test the service previously started is still running: curl ${IP}:${PORT}Again, this should output Migration test. Summary: This lab is now concluded. This exercise has demonstrated the ability ofKubeVirt Live Migration to move a running virtual machine from one node toanother without requiring restart of running applications. " }, { - "id": 202, + "id": 201, "url": "/category/news.html", "title": "News", "author" : "", "tags" : "", "body": " - " }, { - "id": 203, + "id": 202, "url": "/blogs/news", "title": "News", "author" : "", "tags" : "", - "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Announcing KubeVirt v1. 1: November 07, We are very pleased to announce the release of KubeVirt v1. 1! Read More Running KubeVirt with Cluster Autoscaler: September 6, 2023 This post explains how to set up KubeVirt with Cluster Autoscaler on EKS Read More Managing KubeVirt VMs with Ansible: September 5, 2023 This post explains how to manage KubeVirt VMs with the kubevirt. core Ansible collection. Read More NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes: July 21, 2023 This post explains how to configure NetworkPolicies for KubeVirt VMs secondary networks. Read More KubeVirt v1. 0 has landed!: July 11, We are very pleased to announce the release of KubeVirt v1. 0! Read More Secondary networks connected to the physical underlay for KubeVirt VMs using OVN-Kubernetes: May 31, 2023 This post explains how to configure secondary networks connected to the physical underlay for KubeVirt virtual machines. Read More Secondary networks for KubeVirt VMs using OVN-Kubernetes: March 06, 2023 This post explains how to configure cluster-wide overlays as secondary networks for KubeVirt virtual machines. Read More KubeVirt Summit 2023!: Mar 03, 2023 Join us for the KubeVirt community's third annual dedicated online event Read More Simplifying KubeVirt's `VirtualMachine` UX with Instancetypes and Preferences: August 12, 2022 An introduction to Instancetypes and preferences in KubeVirt Read More KubeVirt: installing Microsoft Windows 11 from an ISO: August, 2, 2022 This blog post describes how to create a Microsoft Windows 11 virtual machine with KubeVirt Read More KubeVirt at KubeCon EU 2022: June 28, 2022 A short report on the two sessions KubeVirt presented at KubeCon EU 2022 Read More Load-balancer for virtual machines on bare metal Kubernetes clusters: May 03, 2022 This post illustrates setting up a virtual machine with MetalLB LoadBalancer service. Read More Dedicated migration network in KubeVirt: January 25, 2022 KubeVirt now supports using a separate network for live migrations Read More KubeVirt Summit is coming back!: Jan 24, 2022 Join us for the KubeVirt community's second annual dedicated online event Read More Running real-time workloads with improved performance: October 14, 2021 This blog post details the various enhancements made to improve the performance of real-time workloads in KubeVirt Read More Import AWS AMIs as KubeVirt Golden Images: September 21, 2021 This blog post outlines the fundamentals for how to import VMs from AWS into KubeVirt Read More Running virtual machines in Istio service mesh: August 25, 2021 This blog post demonstrates running virtual machines in Istio service mesh. Read More Kubernetes Authentication Options using KubeVirt Client Library: July 16, 2021 This blog post discusses authentication methods that can be used with the KubeVirt client-go library. Read More Using Intel vGPUs with Kubevirt: April 30, 2021 This blog post guides users on how to improve VM graphics performance using Intel Core processors, GPU Virtualization and Kubevirt. Read More Automated Windows Installation With Tekton Pipelines: April 21, 2021 This blog shows how KubeVirt Tekton Tasks can be utilized to automatically install and setup Windows VMs from scratch Read More The KubeVirt Summit 2021 is a wrap!: Mar 3, 2021 The KubeVirt community held their first dedicated online event last month. Read More KubeVirt Summit is coming!: Jan 12, 2021 The KubeVirt community will have their first dedicated online event Read More Customizing images for containerized VMs part I: December 10, 2020 A use case is exposed where containerized VMs running on top of Kubernetes ease the deployment of standardized VMs required by software developers. In this first part, we focus on creating standard images using different tools and then containerize them so that they can be stored in a container registry. . . . Read More High Availability -- RunStrategies for Virtual Machines: December 04, 2020 This blog post outlines the various RunStrategies available to VMs Read More Multiple Network Attachments with bridge CNI: October 21, 2020 This post illustrates configuring secondary interfaces at VMs with a L2 linux bridge at nodes using just kube api. Read More Import virtual machine from oVirt: August 07, 2020 This blog post describes how to import virtual machine using vm-import-operator Read More Minikube KubeVirt addon: July 20, 2020 This blog post describes how to use minikube and the KubeVirt addon Read More Common-templates: July 01, 2020 This blog post describe basic factors and usage of common-templates Read More Migrate a sample Windows workload to Kubernetes using KubeVirt and CDI: June 22, 2020 This blog post outlines methods to migrate a sample Windows workload to Kubernetes using KubeVirt and CDI Read More SELinux, from basics to KubeVirt: May 25, 2020 This blog details step by step how SELinux is leveraged in KubeVirt to isolate virtual machines from each other. Read More KubeVirt VM Image Usage Patterns: May 12, 2020 This blog post outlines methods for building and using virtual machine images with KubeVirt Read More KubeVirt Operation Fundamentals: April 30, 2020 This blog post outlines fundamentals around the KubeVirt's approach to installs and updates. Read More KubeVirt Security Fundamentals: April 29, 2020 This blog post outlines fundamentals around the KubeVirt's approach to security. Read More KubeVirt Architecture Fundamentals: April 28, 2020 This blog post outlines the core set of design decisions that shaped KubeVirt into what it is today. Read More Live Migration in KubeVirt: March 22, 2020 KubeVirt leverages Live Migration to support workloads to keep running while nodes can be moved to maintenance, etc Check what is needed to get it working and how it works. Read More Advanced scheduling using affinity and anti-affinity rules: February 25, 2020 KubeVirt can take advantage of Kubernetes inner features to provide an advanced scheduling mechanism to virtual machines (VMs). The same or even more complex affinity and anti-affinity rules can be assigned to VMs or Pods in Kubernetes than in traditional virtualization solutions. Read More KubeVirt: installing Microsoft Windows from an ISO: February, 14, 2020 In this blogpost a Virtual Machine is created to install Microsoft Windows in KubeVirt from an ISO following the traditional way. Read More NA KubeCon 2019 - KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt - David Vossel, Red Hat & Vishesh Tanksale, NVIDIA: February, 06, 2019 In this blogpost, we talk about the presentation that David Vossel and Vishesh Tanksale did at the KubeCon 2019 in North America. The talk is called KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt and they go through from a KubeVirt introduction until a complex architecture with NVIDIA GPU devices. . . Read More NA KubeCon 2019 - KubeVirt introduction by Steve Gordon and Chandrakanth Jakkidi: February, 01, 2019 KubeVirt Intro: Virtual Machine Management on Kubernetes - Steve Gordon & Chandrakanth Jakkidi Read More Managing KubeVirt with OpenShift Web Console: January 24, 2020 This article focuses on running the OKD web console in a native Kubernetes cluster leveraging the deep integrations with KubeVirt. OKD web console will allow us to create, manage and delete virtual machines from a friendly user interface Read More KubeVirt Laboratory 3, upgrades: January, 21, 2020 In this video, we are showing the step by step of the KubeVirt Laboratory 3 how to upgrade KubeVirt Read More KubeVirt user interface options: December, 2019 Overview of different user interface options to manage KubeVirt Read More KubeVirt Laboratory 2, experimenting with CDI: December 10, 2019 In this video, we are showing the step by step of the KubeVirt Laboratory 2 Experimenting with CDI Read More KubeVirt Laboratory 1, use KubeVirt: December 4, 2019 In this video, we are showing the step by step of the KubeVirt Laboratory 1 Use KubeVirt Read More KubeVirt basic operations video: November 28, 2019 KubeVirt basic operations video Read More Jenkins Infra upgrade: November 22, 2019 Jenkins CI server upgrade and jobs for KubeVirt labs and image creation refresh Read More KubeVirt at KubeCon + CloudNativeCon North America: November 12, 2019 A summary of KubeVirt related activities during KubeCon + CloudNativeCon North America 2019 in San Diego Read More Access Virtual Machines' graphic console using noVNC: November 11, 2019 Demonstrate how to access virtual machines' graphic console using noVNC. Read More Prow jobs for KubeVirt website and Tutorial repo: October 31, 2019 How prow is used to keep website and tutorials 'up' Read More Jenkins Jobs for KubeVirt lab validation: October 31, 2019 How Jenkins is leveraged for automation at KubeVirt Cloud Image Builder and Lab Validation Read More Persistent storage of your Virtual Machines in KubeVirt with Rook: October, 2019 Persistent storage of your Virtual Machines in KubeVirt with Rook Read More KubeVirt on Kubernetes with CRI-O from scratch - Installing KubeVirt: October 23, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide - Installing KubeVirt Read More KubeVirt on Kubernetes with CRI-O from scratch - Installing Kubernetes: October 16, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide - Installing Kubernetes Read More KubeVirt on Kubernetes with CRI-O from scratch: October, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide Read More KubeVirt is now part of CNCF Sandbox: September, 2019 KubeVirt has been approved as a project in the sandbox Read More KubeVirt Condition Types Renamed: Aug 9, 2019 Condition Types have been RENAMED Read More KubeVirt Condition Types Rename in Custom Resource: Aug 1, 2019 KubeVirt is renaming Condition Types in next release Read More Node Drain in KubeVirt: Jul 30, 2019 Evicting VM's using Node Drain Functionality Read More How to import VM into KubeVirt: Jul 29, 2019 Import a VM into the Kubernetes Platform using CDI Read More Website roadmap: 8 Jul, 2019 List of identified things that might need an improvement Read More KubeVirt with Ansible, part 2: 8 Jul, 2019 A deeper dive into Ansible 2. 8's KubeVirt features Read More KubeVirt vagrant provider: June 4, 2019 The post describes how to use kubevirt vagrant provider Read More KubeVirt with Ansible, part 1 – Introduction: May 21, 2019 With the release of Ansible 2. 8 comes a new set of KubeVirt modules Read More Hyper Converged Operator: May 08, 2019 Hyper Converged Operator on OCP 4 and K8s(HCO) Read More More About Kubevirt Metrics: Mar 14, 2019 A status update about KubeVirt metrics Read More Federated Kubevirt: Feb 22, 2019 Federated KubeVirt Read More An Overview To Kubevirt Metrics: Jan 22, 2019 An overview to KubeVirt metrics Read More Kubevirt Autolatest: Dec 13, 2018 KubeVirt Autodeployer Read More Kubevirt At Kubecon Na: November 26, 2018 KubeVirt at KubeCon North America 2019 Read More Ignition Support: November 20, 2018 Ignition Support Read More New Volume Types: November 16, 2018 New Volume Types - ConfigMap, Secret and ServiceAccount Read More Cdi Datavolumes: October 11, 2018 CDI DataVolumes Read More Containerized Data Importer: October 09, 2018 This post describes how to import, clone and upload a Virtual Machine disk image to kubernetes cluster. Read More Kubevirt Network Rehash: October 11, 2018 Quick rehash of the network deep-dive Read More Attaching To Multiple Networks: September 12, 2018 This post describes how to connect a Virtual Machine to more than one network using the Multus CNI. Read More Kubevirt Memory Overcommit: Sept 11, 2018 KubeVirt Memory Overcommitment Read More Kubevirtci: August 8, 2018 This post tries to give a quick overview of kubevirtci and why we use it to build our testing clusters. Read More Kubevirt V0. 7. 0: July 23, 2018 KubeVirt 0. 7. 0 Highlights Read More Unit Test Howto: July 3, 2018 This post tries to demystify some of our unit test mechanism, hopefully will make it easier to write more tests and increase our code coverage! Read More Run Istio With Kubevirt: June 21, 2018 Use Istio with KubeVirt Read More Kvm Using Device Plugins: June 20, 2018 KubeVirt Using Device Plugins For KVM Read More Some Notes On Some Highlights Of V020: January 05, 2018 The very first KubeVirt release of KubeVirt in the new year () had a few notable highlights which were brewing over the last few weeks. Read More Comparing KubeVirt to other technologies: July, 18, 2017 In this blogpost, we discuss on the technology provided by KubeVirt and how it stands against other technologies available Read More The Role of LibVirt: July, 18, 2017 In this blogpost, we discuss on libvirt role in KubeVirt Read More " + "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date Announcing KubeVirt v1. 1: November 07, We are very pleased to announce the release of KubeVirt v1. 1! Read More Running KubeVirt with Cluster Autoscaler: September 6, 2023 This post explains how to set up KubeVirt with Cluster Autoscaler on EKS Read More Managing KubeVirt VMs with Ansible: September 5, 2023 This post explains how to manage KubeVirt VMs with the kubevirt. core Ansible collection. Read More NetworkPolicies for KubeVirt VMs secondary networks using OVN-Kubernetes: July 21, 2023 This post explains how to configure NetworkPolicies for KubeVirt VMs secondary networks. Read More KubeVirt v1. 0 has landed!: July 11, We are very pleased to announce the release of KubeVirt v1. 0! Read More Secondary networks connected to the physical underlay for KubeVirt VMs using OVN-Kubernetes: May 31, 2023 This post explains how to configure secondary networks connected to the physical underlay for KubeVirt virtual machines. Read More Secondary networks for KubeVirt VMs using OVN-Kubernetes: March 06, 2023 This post explains how to configure cluster-wide overlays as secondary networks for KubeVirt virtual machines. Read More KubeVirt Summit 2023!: Mar 03, 2023 Join us for the KubeVirt community's third annual dedicated online event Read More Simplifying KubeVirt's `VirtualMachine` UX with Instancetypes and Preferences: August 12, 2022 An introduction to Instancetypes and preferences in KubeVirt Read More KubeVirt: installing Microsoft Windows 11 from an ISO: August, 2, 2022 This blog post describes how to create a Microsoft Windows 11 virtual machine with KubeVirt Read More KubeVirt at KubeCon EU 2022: June 28, 2022 A short report on the two sessions KubeVirt presented at KubeCon EU 2022 Read More Load-balancer for virtual machines on bare metal Kubernetes clusters: May 03, 2022 This post illustrates setting up a virtual machine with MetalLB LoadBalancer service. Read More Dedicated migration network in KubeVirt: January 25, 2022 KubeVirt now supports using a separate network for live migrations Read More KubeVirt Summit is coming back!: Jan 24, 2022 Join us for the KubeVirt community's second annual dedicated online event Read More Running real-time workloads with improved performance: October 14, 2021 This blog post details the various enhancements made to improve the performance of real-time workloads in KubeVirt Read More Import AWS AMIs as KubeVirt Golden Images: September 21, 2021 This blog post outlines the fundamentals for how to import VMs from AWS into KubeVirt Read More Running virtual machines in Istio service mesh: August 25, 2021 This blog post demonstrates running virtual machines in Istio service mesh. Read More Kubernetes Authentication Options using KubeVirt Client Library: July 16, 2021 This blog post discusses authentication methods that can be used with the KubeVirt client-go library. Read More Using Intel vGPUs with Kubevirt: April 30, 2021 This blog post guides users on how to improve VM graphics performance using Intel Core processors, GPU Virtualization and Kubevirt. Read More Automated Windows Installation With Tekton Pipelines: April 21, 2021 This blog shows how KubeVirt Tekton Tasks can be utilized to automatically install and setup Windows VMs from scratch Read More The KubeVirt Summit 2021 is a wrap!: Mar 3, 2021 The KubeVirt community held their first dedicated online event last month. Read More KubeVirt Summit is coming!: Jan 12, 2021 The KubeVirt community will have their first dedicated online event Read More Customizing images for containerized VMs part I: December 10, 2020 A use case is exposed where containerized VMs running on top of Kubernetes ease the deployment of standardized VMs required by software developers. In this first part, we focus on creating standard images using different tools and then containerize them so that they can be stored in a container registry. . . . Read More High Availability -- RunStrategies for Virtual Machines: December 04, 2020 This blog post outlines the various RunStrategies available to VMs Read More Multiple Network Attachments with bridge CNI: October 21, 2020 This post illustrates configuring secondary interfaces at VMs with a L2 linux bridge at nodes using just kube api. Read More Import virtual machine from oVirt: August 07, 2020 This blog post describes how to import virtual machine using vm-import-operator Read More Minikube KubeVirt addon: July 20, 2020 This blog post describes how to use minikube and the KubeVirt addon Read More Common-templates: July 01, 2020 This blog post describe basic factors and usage of common-templates Read More Migrate a sample Windows workload to Kubernetes using KubeVirt and CDI: June 22, 2020 This blog post outlines methods to migrate a sample Windows workload to Kubernetes using KubeVirt and CDI Read More SELinux, from basics to KubeVirt: May 25, 2020 This blog details step by step how SELinux is leveraged in KubeVirt to isolate virtual machines from each other. Read More KubeVirt VM Image Usage Patterns: May 12, 2020 This blog post outlines methods for building and using virtual machine images with KubeVirt Read More KubeVirt Operation Fundamentals: April 30, 2020 This blog post outlines fundamentals around the KubeVirt's approach to installs and updates. Read More KubeVirt Security Fundamentals: April 29, 2020 This blog post outlines fundamentals around the KubeVirt's approach to security. Read More KubeVirt Architecture Fundamentals: April 28, 2020 This blog post outlines the core set of design decisions that shaped KubeVirt into what it is today. Read More Live Migration in KubeVirt: March 22, 2020 KubeVirt leverages Live Migration to support workloads to keep running while nodes can be moved to maintenance, etc Check what is needed to get it working and how it works. Read More Advanced scheduling using affinity and anti-affinity rules: February 25, 2020 KubeVirt can take advantage of Kubernetes inner features to provide an advanced scheduling mechanism to virtual machines (VMs). The same or even more complex affinity and anti-affinity rules can be assigned to VMs or Pods in Kubernetes than in traditional virtualization solutions. Read More KubeVirt: installing Microsoft Windows from an ISO: February, 14, 2020 In this blogpost a Virtual Machine is created to install Microsoft Windows in KubeVirt from an ISO following the traditional way. Read More NA KubeCon 2019 - KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt - David Vossel, Red Hat & Vishesh Tanksale, NVIDIA: February, 06, 2019 In this blogpost, we talk about the presentation that David Vossel and Vishesh Tanksale did at the KubeCon 2019 in North America. The talk is called KubeVirt Deep Dive: Virtualized GPU Workloads on KubeVirt and they go through from a KubeVirt introduction until a complex architecture with NVIDIA GPU devices. . . Read More NA KubeCon 2019 - KubeVirt introduction by Steve Gordon and Chandrakanth Jakkidi: February, 01, 2019 KubeVirt Intro: Virtual Machine Management on Kubernetes - Steve Gordon & Chandrakanth Jakkidi Read More Managing KubeVirt with OpenShift Web Console: January 24, 2020 This article focuses on running the OKD web console in a native Kubernetes cluster leveraging the deep integrations with KubeVirt. OKD web console will allow us to create, manage and delete virtual machines from a friendly user interface Read More KubeVirt Laboratory 3, upgrades: January, 21, 2020 In this video, we are showing the step by step of the KubeVirt Laboratory 3 how to upgrade KubeVirt Read More KubeVirt user interface options: December, 2019 Overview of different user interface options to manage KubeVirt Read More KubeVirt Laboratory 2, experimenting with CDI: December 10, 2019 In this video, we are showing the step by step of the KubeVirt Laboratory 2 Experimenting with CDI Read More KubeVirt Laboratory 1, use KubeVirt: December 4, 2019 In this video, we are showing the step by step of the KubeVirt Laboratory 1 Use KubeVirt Read More KubeVirt basic operations video: November 28, 2019 KubeVirt basic operations video Read More Jenkins Infra upgrade: November 22, 2019 Jenkins CI server upgrade and jobs for KubeVirt labs and image creation refresh Read More KubeVirt at KubeCon + CloudNativeCon North America: November 12, 2019 A summary of KubeVirt related activities during KubeCon + CloudNativeCon North America 2019 in San Diego Read More Prow jobs for KubeVirt website and Tutorial repo: October 31, 2019 How prow is used to keep website and tutorials 'up' Read More Jenkins Jobs for KubeVirt lab validation: October 31, 2019 How Jenkins is leveraged for automation at KubeVirt Cloud Image Builder and Lab Validation Read More Persistent storage of your Virtual Machines in KubeVirt with Rook: October, 2019 Persistent storage of your Virtual Machines in KubeVirt with Rook Read More KubeVirt on Kubernetes with CRI-O from scratch - Installing KubeVirt: October 23, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide - Installing KubeVirt Read More KubeVirt on Kubernetes with CRI-O from scratch - Installing Kubernetes: October 16, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide - Installing Kubernetes Read More KubeVirt on Kubernetes with CRI-O from scratch: October, 2019 How to setup a home lab environment with Kubernetes, CRI-O and KubeVirt step by step guide Read More KubeVirt is now part of CNCF Sandbox: September, 2019 KubeVirt has been approved as a project in the sandbox Read More KubeVirt Condition Types Renamed: Aug 9, 2019 Condition Types have been RENAMED Read More KubeVirt Condition Types Rename in Custom Resource: Aug 1, 2019 KubeVirt is renaming Condition Types in next release Read More Node Drain in KubeVirt: Jul 30, 2019 Evicting VM's using Node Drain Functionality Read More How to import VM into KubeVirt: Jul 29, 2019 Import a VM into the Kubernetes Platform using CDI Read More Website roadmap: 8 Jul, 2019 List of identified things that might need an improvement Read More KubeVirt with Ansible, part 2: 8 Jul, 2019 A deeper dive into Ansible 2. 8's KubeVirt features Read More KubeVirt vagrant provider: June 4, 2019 The post describes how to use kubevirt vagrant provider Read More KubeVirt with Ansible, part 1 – Introduction: May 21, 2019 With the release of Ansible 2. 8 comes a new set of KubeVirt modules Read More Hyper Converged Operator: May 08, 2019 Hyper Converged Operator on OCP 4 and K8s(HCO) Read More More About Kubevirt Metrics: Mar 14, 2019 A status update about KubeVirt metrics Read More Federated Kubevirt: Feb 22, 2019 Federated KubeVirt Read More An Overview To Kubevirt Metrics: Jan 22, 2019 An overview to KubeVirt metrics Read More Kubevirt Autolatest: Dec 13, 2018 KubeVirt Autodeployer Read More Kubevirt At Kubecon Na: November 26, 2018 KubeVirt at KubeCon North America 2019 Read More Ignition Support: November 20, 2018 Ignition Support Read More New Volume Types: November 16, 2018 New Volume Types - ConfigMap, Secret and ServiceAccount Read More Cdi Datavolumes: October 11, 2018 CDI DataVolumes Read More Containerized Data Importer: October 09, 2018 This post describes how to import, clone and upload a Virtual Machine disk image to kubernetes cluster. Read More Kubevirt Network Rehash: October 11, 2018 Quick rehash of the network deep-dive Read More Attaching To Multiple Networks: September 12, 2018 This post describes how to connect a Virtual Machine to more than one network using the Multus CNI. Read More Kubevirt Memory Overcommit: Sept 11, 2018 KubeVirt Memory Overcommitment Read More Kubevirtci: August 8, 2018 This post tries to give a quick overview of kubevirtci and why we use it to build our testing clusters. Read More Kubevirt V0. 7. 0: July 23, 2018 KubeVirt 0. 7. 0 Highlights Read More Unit Test Howto: July 3, 2018 This post tries to demystify some of our unit test mechanism, hopefully will make it easier to write more tests and increase our code coverage! Read More Run Istio With Kubevirt: June 21, 2018 Use Istio with KubeVirt Read More Kvm Using Device Plugins: June 20, 2018 KubeVirt Using Device Plugins For KVM Read More Some Notes On Some Highlights Of V020: January 05, 2018 The very first KubeVirt release of KubeVirt in the new year () had a few notable highlights which were brewing over the last few weeks. Read More Comparing KubeVirt to other technologies: July, 18, 2017 In this blogpost, we discuss on the technology provided by KubeVirt and how it stands against other technologies available Read More The Role of LibVirt: July, 18, 2017 In this blogpost, we discuss on libvirt role in KubeVirt Read More " }, , { - "id": 204, + "id": 203, "url": "/privacy/", "title": "Privacy", "author" : "", "tags" : "privacy, cookies, hosting", "body": " - Privacy Statement for the KubeVirt Project: As KubeVirt is a project of the Cloud Native Computing Foundation, this site falls under the Linux Foundation Privacy Policy. All terms of that privacy policy apply to this site. How to Contact Us: If you have any questions about any of these practices or KubeVirt’s use of your personal information, please feel free to contact us or file an Issue in our Github repo. KubeVirt will work with you to resolve any concerns you may have about this Statement. Changes to this Privacy Statement: KubeVirt reserves the right to change this policy from time to time. If we do make changes, the revised Privacy Statement will be posted on this site. A notice will be posted on our blog and/or mailing lists whenever this privacy statement is changed in a material way. This Privacy Statement was last amended on August 17, 2021. " }, , { - "id": 205, + "id": 204, "url": "/quickstart_cloud/", "title": "KubeVirt quickstart with cloud providers", "author" : "", "tags" : "AliCloud, Amazon, AWS, Google, GCP, Kubernetes, KubeVirt, quickstart, tutorial, VM, virtual machine", "body": " - Easy install using cloud providers: KubeVirt can be used on cloud computing providers such as AWS, Azure, GCP, AliCloud. Prepare a cloud based Kubernetes cluster: A kubectl client is necessary for operating a Kubernetes cluster. It is important to install a kubectl client version that matches the kubernetes version to avoid issues regarding skew. To install kubectl client please follow the official documentation for your system using the instructions located here. Check the Kubernetes. io Turnkey Cloud Solutions guide for each cloud provider on how to build infrastructure to match your use case. Be aware of the costs of associated with using infrastructure provided by cloud computing providers. Future labs will require at least 30 GiB of disk space. Deploy KubeVirt: KubeVirt can be installed using the KubeVirt operator, which manages the lifecycle of all the KubeVirt core components. Use kubectl to deploy the KubeVirt operator: export VERSION=$(curl -s https://storage. googleapis. com/kubevirt-prow/release/kubevirt/kubevirt/stable. txt)echo $VERSIONkubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-operator. yaml Nested virtualization If the minikube cluster runs on a virtual machine consider enabling nested virtualization. Follow the instructions described here. If for any reason nested virtualization cannot be enabled do enable KubeVirt emulation as follows: kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{ spec :{ configuration :{ developerConfiguration :{ useEmulation :true}}}}' Again use kubectl to deploy the KubeVirt custom resource definitions: kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr. yaml Verify components: By default KubeVirt will deploy 7 pods, 3 services, 1 daemonset, 3 deployment apps, 3 replica sets. Check the deployment: kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. phase} Check the components: kubectl get all -n kubevirt Virtctl: KubeVirt provides an additional binary called virtctl for quick access to the serial and graphical ports of a VM and also handle start/stop operations. Install: virtctl can be retrieved from the release page of the KubeVirt github page. Run the following: VERSION=$(kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. observedKubeVirtVersion} )ARCH=$(uname -s | tr A-Z a-z)-$(uname -m | sed 's/x86_64/amd64/') || windows-amd64. exeecho ${ARCH}curl -L -o virtctl https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-${ARCH}chmod +x virtctlsudo install virtctl /usr/local/bin Install as Krew plugin: virtctl can be installed as a plugin via the krew plugin manager. Occurrences of virtctl <command>. . . can then be read as kubectl virt <command>. . . . Run the following to install: kubectl krew install virt What’s next: Labs: After you have deployed KubeVirt you can work through the labs to help you get acquainted with KubeVirt and how it can be used to create and deploy VMs with Kubernetes. The first lab is “Use KubeVirt”. This lab walks through the creation of a Virtual Machine Instance (VMI) on Kubernetes and then how virtctl is used to interact with its console. The second lab is “Experiment with CDI”. This lab shows how to use the Containerized Data Importer (CDI) to import a VM image into a Persistent Volume Claim (PVC) and then how to attach the PVC to a VM as a block device. The third lab is “KubeVirt upgrades”. This lab shows how easy and safe is to upgrade the KubeVirt installation with zero down-time. Found a bug?: We are interested in hearing about your experience. Please report any problems to the kubevirt. io issue tracker. " }, { - "id": 206, + "id": 205, "url": "/quickstart_kind/", "title": "KubeVirt quickstart with kind", "author" : "", "tags" : "Kubernetes, kind, kubevirt, VM, virtual machine", "body": " - Easy install using kind: Kind quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows allowing software developers to quickly get started working with Kubernetes. Prepare kind Kubernetes environment: A kubectl client is necessary for operating a Kubernetes cluster. It is important to install a kubectl client version that matches the kubernetes version to avoid issues regarding skew. To install kubectl client please follow the official documentation for your system using the instructions located here. To install kind please follow the official documentation for your system using the instructions located here. Starting kind can be as simple as running the following command: kind create cluster See the kind User Guide here for advanced start options and instructions on how to operate kind. Deploy KubeVirt: KubeVirt can be installed using the KubeVirt operator, which manages the lifecycle of all the KubeVirt core components. Use kubectl to deploy the KubeVirt operator: export VERSION=$(curl -s https://storage. googleapis. com/kubevirt-prow/release/kubevirt/kubevirt/stable. txt)echo $VERSIONkubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-operator. yaml Nested virtualization If the kind cluster runs on a virtual machine consider enabling nested virtualization. Follow the instructions described here. If for any reason nested virtualization cannot be enabled do enable KubeVirt emulation as follows: kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{ spec :{ configuration :{ developerConfiguration :{ useEmulation :true}}}}' Again use kubectl to deploy the KubeVirt custom resource definitions: kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr. yaml Verify components: By default KubeVirt will deploy 7 pods, 3 services, 1 daemonset, 3 deployment apps, 3 replica sets. Check the deployment: kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. phase} Check the components: kubectl get all -n kubevirt Virtctl: KubeVirt provides an additional binary called virtctl for quick access to the serial and graphical ports of a VM and also handle start/stop operations. Install: virtctl can be retrieved from the release page of the KubeVirt github page. Run the following: VERSION=$(kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. observedKubeVirtVersion} )ARCH=$(uname -s | tr A-Z a-z)-$(uname -m | sed 's/x86_64/amd64/') || windows-amd64. exeecho ${ARCH}curl -L -o virtctl https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-${ARCH}chmod +x virtctlsudo install virtctl /usr/local/bin Install as Krew plugin: virtctl can be installed as a plugin via the krew plugin manager. Occurrences of virtctl <command>. . . can then be read as kubectl virt <command>. . . . Run the following to install: kubectl krew install virt What’s next: Labs: After you have deployed KubeVirt you can work through the labs to help you get acquainted with KubeVirt and how it can be used to create and deploy VMs with Kubernetes. The first lab is “Use KubeVirt”. This lab walks through the creation of a Virtual Machine Instance (VMI) on Kubernetes and then how virtctl is used to interact with its console. The second lab is “Experiment with CDI”. This lab shows how to use the Containerized Data Importer (CDI) to import a VM image into a Persistent Volume Claim (PVC) and then how to attach the PVC to a VM as a block device. The third lab is “KubeVirt upgrades”. This lab shows how easy and safe is to upgrade the KubeVirt installation with zero down-time. Found a bug?: We are interested in hearing about your experience. Please report any problems to the kubevirt. io issue tracker. " }, { - "id": 207, + "id": 206, "url": "/quickstart_minikube/", "title": "KubeVirt quickstart with Minikube", "author" : "", "tags" : "Kubernetes, minikube, minikube addons, kubevirt, VM, virtual machine", "body": " - Easy install using minikube: Minikube quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows allowing software developers to quickly get started working with Kubernetes. Prepare minikube Kubernetes environment: A kubectl client is necessary for operating a Kubernetes cluster. It is important to install a kubectl client version that matches the kubernetes version to avoid issues regarding skew. To install kubectl client please follow the official documentation for your system using the instructions located here. Minikube ships a kubectl client version that matches the kubernetes version to avoid skew issues. To use the minikube shipped client do one of the following: All normal kubectl commands should be performed as minikube kubectl It can be added to aliases by running the following: alias kubectl='minikube kubectl --' It can be installed directly to the host by running the following: VERSION=$(minikube kubectl version | head -1 | awk -F', ' {'print $3'} | awk -F':' {'print $2'} | sed s/\ //g)sudo install ${HOME}/. minikube/cache/linux/${VERSION}/kubectl /usr/local/bin To install minikube please follow the official documentation for your system using the instructions located here. Starting minikube can be as simple as running the following command: minikube start --cni=flannel CNI: We add the container network interface (CNI) called flannel to make minikube work with VMs that use a masquerade type network interface. If a CNI does not work for you, switch instances of “masquerade” to “bridge” in example VM definitions. See the minikube handbook here for advanced start options and instructions on how to operate minikube. Multi-Node Minikube: Minikube has support for adding additional nodes to a cluster. This can behelpful in experimenting with KubeVirt on minikube as some operations like nodeaffinity or live migration require more than one cluster node to demonstrate. Container Network Interface: By default, minikube sets up a kubernetes cluster using either a virtualmachine appliance or a container. For a single node setup, local networkconnectivity is sufficient. In the case where multiple nodes are involved, evenwhen using containers or VMs on the same host, kubernetes needs to define ashared network to allow pods on one host to communicate with pods on the otherhost. To this end, minikube supports a number of Container Network Interface(CNI) pluginsthe simplest of which is flannel. Updating the minikube start command: To have minikube start up with the flannel CNI plugin over two nodes, alter the minikube start command: minikube start --nodes=2 --cni=flannelCore DNS race condition An issue has beenreported where thecoredns pod in multi-node minikube comes up with the wrong IP address. Ifthis happens, kubevirt will fail to install properly. To work around, deletethe coredns pod from the kube-system namespace and disable/enable thekubevirt addon in minikube. Deploy KubeVirt: KubeVirt can be installed using the KubeVirt operator, which manages the lifecycle of all the KubeVirt core components. Below are two examples of how to install KubeVirt using the latest release. The easy way: Addon currently broken An issue has been reported where more recent versions of minikube break the kubevirt addon. Fall back to the “in-depth” section below until this is resolved. Installing KubeVirt can be as simple as the following command: minikube addons enable kubevirt The in-depth way: Use kubectl to deploy the KubeVirt operator: export VERSION=$(curl -s https://storage. googleapis. com/kubevirt-prow/release/kubevirt/kubevirt/stable. txt)echo $VERSIONkubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-operator. yaml Nested virtualization If the minikube cluster runs on a virtual machine consider enabling nested virtualization. Follow the instructions described here. If for any reason nested virtualization cannot be enabled do enable KubeVirt emulation as follows: kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{ spec :{ configuration :{ developerConfiguration :{ useEmulation :true}}}}' Again use kubectl to deploy the KubeVirt custom resource definitions: kubectl create -f https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr. yaml Verify components: By default KubeVirt will deploy 7 pods, 3 services, 1 daemonset, 3 deployment apps, 3 replica sets. Check the deployment: kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. phase} Check the components: kubectl get all -n kubevirt When using the minikube KubeVirt addon check logs of the kubevirt-install-manager pod: kubectl logs pod/kubevirt-install-manager -n kube-system Virtctl: KubeVirt provides an additional binary called virtctl for quick access to the serial and graphical ports of a VM and also handle start/stop operations. Install: virtctl can be retrieved from the release page of the KubeVirt github page. Run the following: VERSION=$(kubectl get kubevirt. kubevirt. io/kubevirt -n kubevirt -o=jsonpath= {. status. observedKubeVirtVersion} )ARCH=$(uname -s | tr A-Z a-z)-$(uname -m | sed 's/x86_64/amd64/') || windows-amd64. exeecho ${ARCH}curl -L -o virtctl https://github. com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-${ARCH}chmod +x virtctlsudo install virtctl /usr/local/bin Install as Krew plugin: virtctl can be installed as a plugin via the krew plugin manager. Occurrences of virtctl <command>. . . can then be read as kubectl virt <command>. . . . Run the following to install: kubectl krew install virt What’s next: Labs: After you have deployed KubeVirt you can work through the labs to help you get acquainted with KubeVirt and how it can be used to create and deploy VMs with Kubernetes. The first lab is “Use KubeVirt”. This lab walks through the creation of a Virtual Machine Instance (VMI) on Kubernetes and then how virtctl is used to interact with its console. The second lab is “Experiment with CDI”. This lab shows how to use the Containerized Data Importer (CDI) to import a VM image into a Persistent Volume Claim (PVC) and then how to attach the PVC to a VM as a block device. The third lab is “KubeVirt upgrades”. This lab shows how easy and safe is to upgrade the KubeVirt installation with zero down-time. Found a bug?: We are interested in hearing about your experience. Please report any problems to the kubevirt. io issue tracker. " }, { - "id": 208, + "id": 207, "url": "/category/releases.html", "title": "Releases", "author" : "", "tags" : "", "body": " - " }, { - "id": 209, + "id": 208, "url": "/blogs/releases", "title": "Releases", "author" : "", "tags" : "", - "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date KubeVirt v1. 1. 0: November 06, 2023 This article provides information about KubeVirt release v1. 1. 0 changes Read More KubeVirt v1. 0. 0: July 06, 2023 This article provides information about KubeVirt release v1. 0. 0 changes Read More KubeVirt v0. 59. 0: March 01, 2023 This article provides information about KubeVirt release v0. 59. 0 changes Read More KubeVirt v0. 58. 0: October 13, 2022 This article provides information about KubeVirt release v0. 58. 0 changes Read More KubeVirt v0. 57. 0: September 12, 2022 This article provides information about KubeVirt release v0. 57. 0 changes Read More KubeVirt v0. 56. 0: August 18, 2022 This article provides information about KubeVirt release v0. 56. 0 changes Read More KubeVirt v0. 55. 0: July 14, 2022 This article provides information about KubeVirt release v0. 55. 0 changes Read More KubeVirt v0. 54. 0: June 08, 2022 This article provides information about KubeVirt release v0. 54. 0 changes Read More KubeVirt v0. 53. 0: May 09, 2022 This article provides information about KubeVirt release v0. 53. 0 changes Read More KubeVirt v0. 52. 0: April 08, 2022 This article provides information about KubeVirt release v0. 52. 0 changes Read More KubeVirt v0. 51. 0: March 08, 2022 This article provides information about KubeVirt release v0. 51. 0 changes Read More KubeVirt v0. 50. 0: February 09, 2022 This article provides information about KubeVirt release v0. 50. 0 changes Read More KubeVirt v0. 49. 0: January 11, 2022 This article provides information about KubeVirt release v0. 49. 0 changes Read More KubeVirt v0. 48. 0: December 06, 2021 This article provides information about KubeVirt release v0. 48. 0 changes Read More KubeVirt v0. 46. 0: October 08, 2021 This article provides information about KubeVirt release v0. 46. 0 changes Read More KubeVirt v0. 45. 0: September 08, 2021 This article provides information about KubeVirt release v0. 45. 0 changes Read More KubeVirt v0. 44. 0: August 09, 2021 This article provides information about KubeVirt release v0. 44. 0 changes Read More KubeVirt v0. 43. 0: July 09, 2021 This article provides information about KubeVirt release v0. 43. 0 changes Read More KubeVirt v0. 42. 0: June 08, 2021 This article provides information about KubeVirt release v0. 42. 0 changes Read More KubeVirt v0. 41. 0: May 12, 2021 This article provides information about KubeVirt release v0. 41. 0 changes Read More KubeVirt v0. 40. 0: April 19, 2021 This article provides information about KubeVirt release v0. 40. 0 changes Read More KubeVirt v0. 39. 0: March 10, 2021 This article provides information about KubeVirt release v0. 39. 0 changes Read More KubeVirt v0. 38. 0: February 08, 2021 This article provides information about KubeVirt release v0. 38. 0 changes Read More KubeVirt v0. 37. 0: January 18, 2021 This article provides information about KubeVirt release v0. 37. 0 changes Read More KubeVirt v0. 36. 0: December 16, 2020 This article provides information about KubeVirt release v0. 36. 0 changes Read More KubeVirt v0. 35. 0: November 09, 2020 This article provides information about KubeVirt release v0. 35. 0 changes Read More KubeVirt v0. 34. 0: October 07, 2020 This article provides information about KubeVirt release v0. 34. 0 changes Read More KubeVirt v0. 33. 0: September 15, 2020 This article provides information about KubeVirt release v0. 33. 0 changes Read More KubeVirt v0. 32. 0: August 11, 2020 This article provides information about KubeVirt release v0. 32. 0 changes Read More KubeVirt v0. 31. 0: July 09, 2020 This article provides information about KubeVirt release v0. 31. 0 changes Read More KubeVirt v0. 30. 0: June 05, 2020 This article provides information about KubeVirt release v0. 30. 0 changes Read More KubeVirt v0. 29. 0: May 06, 2020 This article provides information about KubeVirt release v0. 29. 0 changes Read More KubeVirt v0. 28. 0: April 09, 2020 This article provides information about KubeVirt release v0. 28. 0 changes Read More KubeVirt v0. 27. 0: March 06, 2020 This article provides information about KubeVirt release v0. 27. 0 changes Read More KubeVirt v0. 26. 0: February 07, 2020 This article provides information about KubeVirt release v0. 26. 0 changes Read More KubeVirt v0. 25. 0: January 13, 2020 This article provides information about KubeVirt release v0. 25. 0 changes Read More KubeVirt v0. 24. 0: December 03, 2019 This article provides information about KubeVirt release v0. 24. 0 changes Read More KubeVirt v0. 23. 0: November 04, 2019 This article provides information about KubeVirt release v0. 23. 0 changes Read More KubeVirt v0. 22. 0: October 10, 2019 This article provides information about KubeVirt release v0. 22. 0 changes Read More KubeVirt v0. 21. 0: September 09, 2019 This article provides information about KubeVirt release v0. 21. 0 changes Read More KubeVirt v0. 20. 0: August 09, 2019 This article provides information about KubeVirt release v0. 20. 0 changes Read More KubeVirt v0. 19. 0: July 05, 2019 This article provides information about KubeVirt release v0. 19. 0 changes Read More KubeVirt v0. 18. 0: June 05, 2019 This article provides information about KubeVirt release v0. 18. 0 changes Read More KubeVirt v0. 17. 0: May 06, 2019 This article provides information about KubeVirt release v0. 17. 0 changes Read More KubeVirt v0. 16. 0: April 05, 2019 This article provides information about KubeVirt release v0. 16. 0 changes Read More KubeVirt v0. 15. 0: March 05, 2019 This article provides information about KubeVirt release v0. 15. 0 changes Read More KubeVirt v0. 14. 0: February 04, 2019 This article provides information about KubeVirt release v0. 14. 0 changes Read More KubeVirt v0. 13. 0: January 15, 2019 This article provides information about KubeVirt release v0. 13. 0 changes Read More KubeVirt v0. 12. 0: January 11, 2019 This article provides information about KubeVirt release v0. 12. 0 changes Read More KubeVirt v0. 11. 0: December 06, 2018 This article provides information about KubeVirt release v0. 11. 0 changes Read More KubeVirt v0. 10. 0: November 08, 2018 This article provides information about KubeVirt release v0. 10. 0 changes Read More KubeVirt v0. 9. 0: October 04, 2018 This article provides information about KubeVirt release v0. 9. 0 changes Read More KubeVirt v0. 8. 0: September 06, 2018 This article provides information about KubeVirt release v0. 8. 0 changes Read More KubeVirt v0. 7. 0: July 04, 2018 This article provides information about KubeVirt release v0. 7. 0 changes Read More KubeVirt v0. 6. 0: June 11, 2018 This article provides information about KubeVirt release v0. 6. 0 changes Read More KubeVirt v0. 5. 0: May 04, 2018 This article provides information about KubeVirt release v0. 5. 0 changes Read More KubeVirt v0. 4. 0: April 06, 2018 This article provides information about KubeVirt release v0. 4. 0 changes Read More KubeVirt v0. 3. 0: March 08, 2018 This article provides information about KubeVirt release v0. 3. 0 changes Read More Kube Virt v0. 2. 0: January 05, 2018 This release follows v0. 1. 0 and consists of 131 changes, contributed by 6 people, leading to 148 files changed, 9096 insertions(+), 5871 deletions(-). Read More Kube Virt v0. 1. 0: December 08, 2017 This release follows v0. 0. 4 and consists of 115 changes, contributed by 11 people, leading to 121 files changed, 5278 insertions(+), 1916 deletions(-). Read More Kube Virt v0. 0. 4: November 07, 2017 This release follows v0. 0. 3 and consists of 133 changes, contributed by 14 people, leading to 109 files changed, 7093 insertions(+), 2437 deletions(-). Read More " + "body": " - Blogs Categories: News Weekly Updates Releases Uncategorized Additional filters: Grouped by Date KubeVirt v1. 1. 0: November 06, 2023 This article provides information about KubeVirt release v1. 1. 0 changes Read More KubeVirt v1. 0. 0: July 06, 2023 This article provides information about KubeVirt release v1. 0. 0 changes Read More KubeVirt v0. 59. 0: March 01, 2023 This article provides information about KubeVirt release v0. 59. 0 changes Read More KubeVirt v0. 58. 0: October 13, 2022 This article provides information about KubeVirt release v0. 58. 0 changes Read More KubeVirt v0. 57. 0: September 12, 2022 This article provides information about KubeVirt release v0. 57. 0 changes Read More KubeVirt v0. 56. 0: August 18, 2022 This article provides information about KubeVirt release v0. 56. 0 changes Read More KubeVirt v0. 55. 0: July 14, 2022 This article provides information about KubeVirt release v0. 55. 0 changes Read More KubeVirt v0. 54. 0: June 08, 2022 This article provides information about KubeVirt release v0. 54. 0 changes Read More KubeVirt v0. 53. 0: May 09, 2022 This article provides information about KubeVirt release v0. 53. 0 changes Read More KubeVirt v0. 52. 0: April 08, 2022 This article provides information about KubeVirt release v0. 52. 0 changes Read More KubeVirt v0. 51. 0: March 08, 2022 This article provides information about KubeVirt release v0. 51. 0 changes Read More KubeVirt v0. 50. 0: February 09, 2022 This article provides information about KubeVirt release v0. 50. 0 changes Read More KubeVirt v0. 49. 0: January 11, 2022 This article provides information about KubeVirt release v0. 49. 0 changes Read More KubeVirt v0. 48. 0: December 06, 2021 This article provides information about KubeVirt release v0. 48. 0 changes Read More KubeVirt v0. 46. 0: October 08, 2021 This article provides information about KubeVirt release v0. 46. 0 changes Read More KubeVirt v0. 45. 0: September 08, 2021 This article provides information about KubeVirt release v0. 45. 0 changes Read More KubeVirt v0. 44. 0: August 09, 2021 This article provides information about KubeVirt release v0. 44. 0 changes Read More KubeVirt v0. 43. 0: July 09, 2021 This article provides information about KubeVirt release v0. 43. 0 changes Read More KubeVirt v0. 42. 0: June 08, 2021 This article provides information about KubeVirt release v0. 42. 0 changes Read More KubeVirt v0. 41. 0: May 12, 2021 This article provides information about KubeVirt release v0. 41. 0 changes Read More KubeVirt v0. 40. 0: April 19, 2021 This article provides information about KubeVirt release v0. 40. 0 changes Read More KubeVirt v0. 39. 0: March 10, 2021 This article provides information about KubeVirt release v0. 39. 0 changes Read More KubeVirt v0. 38. 0: February 08, 2021 This article provides information about KubeVirt release v0. 38. 0 changes Read More KubeVirt v0. 37. 0: January 18, 2021 This article provides information about KubeVirt release v0. 37. 0 changes Read More KubeVirt v0. 36. 0: December 16, 2020 This article provides information about KubeVirt release v0. 36. 0 changes Read More KubeVirt v0. 35. 0: November 09, 2020 This article provides information about KubeVirt release v0. 35. 0 changes Read More KubeVirt v0. 34. 0: October 07, 2020 This article provides information about KubeVirt release v0. 34. 0 changes Read More KubeVirt v0. 33. 0: September 15, 2020 This article provides information about KubeVirt release v0. 33. 0 changes Read More KubeVirt v0. 32. 0: August 11, 2020 This article provides information about KubeVirt release v0. 32. 0 changes Read More KubeVirt v0. 31. 0: July 09, 2020 This article provides information about KubeVirt release v0. 31. 0 changes Read More KubeVirt v0. 30. 0: June 05, 2020 This article provides information about KubeVirt release v0. 30. 0 changes Read More KubeVirt v0. 29. 0: May 06, 2020 This article provides information about KubeVirt release v0. 29. 0 changes Read More KubeVirt v0. 28. 0: April 09, 2020 This article provides information about KubeVirt release v0. 28. 0 changes Read More KubeVirt v0. 27. 0: March 06, 2020 This article provides information about KubeVirt release v0. 27. 0 changes Read More KubeVirt v0. 26. 0: February 07, 2020 This article provides information about KubeVirt release v0. 26. 0 changes Read More KubeVirt v0. 25. 0: January 13, 2020 This article provides information about KubeVirt release v0. 25. 0 changes Read More KubeVirt v0. 24. 0: December 03, 2019 This article provides information about KubeVirt release v0. 24. 0 changes Read More KubeVirt v0. 23. 0: November 04, 2019 This article provides information about KubeVirt release v0. 23. 0 changes Read More KubeVirt v0. 22. 0: October 10, 2019 This article provides information about KubeVirt release v0. 22. 0 changes Read More KubeVirt v0. 21. 0: September 09, 2019 This article provides information about KubeVirt release v0. 21. 0 changes Read More KubeVirt v0. 20. 0: August 09, 2019 This article provides information about KubeVirt release v0. 20. 0 changes Read More KubeVirt v0. 19. 0: July 05, 2019 This article provides information about KubeVirt release v0. 19. 0 changes Read More KubeVirt v0. 18. 0: June 05, 2019 This article provides information about KubeVirt release v0. 18. 0 changes Read More KubeVirt v0. 17. 0: May 06, 2019 This article provides information about KubeVirt release v0. 17. 0 changes Read More KubeVirt v0. 16. 0: April 05, 2019 This article provides information about KubeVirt release v0. 16. 0 changes Read More KubeVirt v0. 15. 0: March 05, 2019 This article provides information about KubeVirt release v0. 15. 0 changes Read More KubeVirt v0. 14. 0: February 04, 2019 This article provides information about KubeVirt release v0. 14. 0 changes Read More KubeVirt v0. 13. 0: January 15, 2019 This article provides information about KubeVirt release v0. 13. 0 changes Read More KubeVirt v0. 12. 0: January 11, 2019 This article provides information about KubeVirt release v0. 12. 0 changes Read More KubeVirt v0. 11. 0: December 06, 2018 This article provides information about KubeVirt release v0. 11. 0 changes Read More KubeVirt v0. 10. 0: November 08, 2018 This article provides information about KubeVirt release v0. 10. 0 changes Read More KubeVirt v0. 9. 0: October 04, 2018 This article provides information about KubeVirt release v0. 9. 0 changes Read More KubeVirt v0. 8. 0: September 06, 2018 This article provides information about KubeVirt release v0. 8. 0 changes Read More KubeVirt v0. 7. 0: July 04, 2018 This article provides information about KubeVirt release v0. 7. 0 changes Read More KubeVirt v0. 6. 0: June 11, 2018 This article provides information about KubeVirt release v0. 6. 0 changes Read More KubeVirt v0. 5. 0: May 04, 2018 This article provides information about KubeVirt release v0. 5. 0 changes Read More KubeVirt v0. 4. 0: April 06, 2018 This article provides information about KubeVirt release v0. 4. 0 changes Read More KubeVirt v0. 3. 0: March 08, 2018 This article provides information about KubeVirt release v0. 3. 0 changes Read More Kube Virt v0. 2. 0: January 05, 2018 This release follows v0. 1. 0 and consists of 131 changes, contributed by 6 people, leading to 148 files changed, 9096 insertions(+), 5871 deletions(-). Read More Kube Virt v0. 1. 0: December 08, 2017 This release follows v0. 0. 4 and consists of 115 changes, contributed by 11 people, leading to 121 files changed, 5278 insertions(+), 1916 deletions(-). Read More Kube Virt v0. 0. 4: November 07, 2017 This release follows v0. 0. 3 and consists of 133 changes, contributed by 14 people, leading to 109 files changed, 7093 insertions(+), 2437 deletions(-). Read More " }, , , { - "id": 210, + "id": 209, "url": "/summit/", "title": "KubeVirt Summit 2023", "author" : "", "tags" : "", "body": " - The third online KubeVirt Summit is coming on March 29-30! The KubeVirt Summit is a 2-day virtual event to discover,discuss, hack and learn about managing virtual machines in Kubernetes usingKubeVirt. This is an opportunity for us to share what we’re working on and to promote ideas and discussion within the community in real-time. How to attend: Register for KubeVirt Summit 2023 on the CNCF Community events page. Attendance is free. Schedule: Both days were orginally scheduled for 14:00 - 19:00 UTC (10:00–15:00 EDT, 16:00–21:00 CEST). Due to technical issues Thursday will start will be 13:00 - 19:00 UTC. March 29: 1400-1425: Opening remarks and update on KubeVirt’s Road to V1Presented by Ryan Hallisey & Fabian Deutsch Welcome to KubeVirt Summit! The KubeVirt community will soon create the 59th release for KubeVirt, so let’s talk about what it will take for the next release to be v1. 0. 0. In this talk we’ll discuss the upcoming changes in the community to get ready for 1. 0 and the timeline. 14:30-1455: Moving the instance type API towards v1 and streamlining the VM creation processPresented by Lee Yarwood & Felix Matouschek This presentation introduces the current state of the Instance type API (currently v1alpha2) and discusses the future planned improvements as we move towards v1. It will also provide an insight into the latest development advances in KubeVirt aiming to streamline the virtual machine creation process. By introducing virtual machine instance types and preferences, KubeVirt gains abstractions for resource sizing, performance and OS support, which allow users to focus on the parameters relevant to their applications. To make instance types and preferences approachable, the command line tools of KubeVirt were extended to enable a user experience on a par with all major hyperscalers. Attendees of this talk will learn about KubeVirt’s new instance types and preferences, how they considerably improve the user experience and how they reduce the maintenance effort of KubeVirt virtual machines. 1500-15:25: Applying Parallel CI testing on Arm64Presented by Haolin Zhang Currently, we have enabled parallel CI testing on Arm64 server. As the current arm64 server does not support nested virtualization, we use kind platform to run the test. In this section, I will show how we run the CI test in kind environment and what issues we meet when trying to enable the parallel testing. 15:30-15:55: Squash the flakes! - How does the flake process work? What tools do we have? How do we minimize the impact?Presented by Daniel Hiller Flakes aka tests that don’t behave deterministically, i. e. they fail sometimes and pass sometimes, are an ever recurring problem in software development. This is especially the sad reality when running e2e tests where a lot of components are involved. There are various reasons to why a test can be flaky, however the impact can be as fatal as CI being loaded beyond capacity causing overly long feedback cycles or even users losing trust in CI itself. We want to remove flakes as fast as possible to minimize number of retests required. This should lead to shorter time to merge, reduce CI user frustration, improve trust in CI, while at the same time decrease overall load for the CI system. We start by generating a report of tests that have failed at least once inside a merged PR, meaning that in the end all tests have succeeded, thus flaky tests have been run inside CI. We then look at the report to separate flakes from real issues and forward the flakes to dev teams. As a result retest numbers have gone down significantly over the last year. After attending the session the user will have an idea of what our flake process is, how we exercise it and what the actual outcomes are. 16:00-16:25: Scaling KubeVirt reach to legacy virtualization administrators and users by means of KubeVirt-ManagerPresented by Marcelo Feitoza Parisi KubeVirt-Manager is an Open Source initiative that plans to democratize KubeVirt usage and scale KubeVirt’s reach to legacy virtualization administrators and users, by delivering a simple, effective and friendly Web User Interface for KubeVirt, using technologies like AngularJS, Bootstrap and NoVNC embedded. By implementing a simple Web User Interface, KubeVirt-Manager can effectively eliminate the needs of writing and managing complex Kubernetes YAML files. Containerized Data Importer is also used by KubeVirt-Manager as a backend for Data Volume general management tasks, like provisioning, creating and scaling. 16:30-16:55: How Killercoda works with KubeVirtPresented by Meha Bhalodiya & Adam Gardner By using KubeVirt in conjunction with Killercoda, users can take advantage of the benefits of virtualization while still utilizing the benefits of Kubernetes. This can provide a powerful and flexible platform for running VMs, and can help to simplify the management of VMs and to improve the performance and security of the platform. The integration of virtualization technology with Kubernetes allows customers to easily manage and monitor their VMs while taking advantage of the scalability and self-healing capabilities of Kubernetes. With Killercoda, users can create custom virtual networks, use firewalls and load balancers, and even establish VPN connections between VMs and other resources. 17:00-17:50: DPU Accelerated Networking for KubeVirt PodsPresented by Girish Moodalbail NVIDIA BlueField-2 data processing unit (DPU) delivers a broad set of hardware accelerators to accelerate software-defined networking, storage, and security. In this talk, we are going to focus on SDN and discuss: How have we implemented network virtualization to provide network isolation between KubeVirt Pods How have we pushed the network virtualization control plane to the DPU, “bump-in-the-wire” model, from the Kubernetes Node How have we implemented multi-homed networks for KubeVirt pods How have we leveraged the OVN/OVS SDN managed by OVN Kubernetes CNI to achieve the aforementioned features How have we accelerated the datapath leveraging the DPU’s ASAP2 (Accelerated switching and Packet Processing) technology that has enabled us in achieving high throughput and low latency traffic flows while providing wire speed support for firewall, NATing (SNAT/DNAT), forwarding, QoS, and so on. 18:00-18:25: Case Study: Upgrading KubeVirt in productionPresented by Alay Patel NVIDIA recently upgraded KubeVirt in production from 0. 35 to 0. 50. This talk will discuss the challenges that we faced and the lessons learned. This talk will then cover some on-going work in the community (change in release cadence, discussion about api-stability, etc) in order to make upgrades better. 18:30-1900: Cloud Native Virtual Dev EnvironmentsPresented by Hippie Hacker & Jay Tihema Want to develop in the cloud with your friends? We’ll invite you to walk through a demo of using coder with templates using KubeVirt and CAPI to create on demand shared development environments hosted within their own clusters. Something you can host at home or in the cloud! March 30: 1300-1325: Update on KubeVirt’s Road to V1Presented by Ryan Hallisey & Fabian Deutsch The KubeVirt community will soon create the 59th release for KubeVirt, so let’s talk about what it will take for the next release to be v1. 0. 0. In this talk we’ll discuss the upcoming changes in the community to get ready for 1. 0 and the timeline. 13:30-13:55: Moving the instance type API towards v1 and streamlining the VM creation processPresented by Lee Yarwood & Felix Matouschek This presentation introduces the current state of the Instance type API (currently v1alpha2) and discusses the future planned improvements as we move towards v1. It will also provide an insight into the latest development advances in KubeVirt aiming to streamline the virtual machine creation process. By introducing virtual machine instance types and preferences, KubeVirt gains abstractions for resource sizing, performance and OS support, which allow users to focus on the parameters relevant to their applications. To make instance types and preferences approachable, the command line tools of KubeVirt were extended to enable a user experience on a par with all major hyperscalers. Attendees of this talk will learn about KubeVirt’s new instance types and preferences, how they considerably improve the user experience and how they reduce the maintenance effort of KubeVirt virtual machines. 14:00-14:25: The latest in KubeVirt VM exportsPresented by Maya Rashish We’ll talk about the recently introduced feature for easily exporting VMs and use some recent quality of life improvements that have made it in since the feature was introduced 14:30-14:55: High Performance Network Stack for KubeVirt-based Managed KubernetesPresented by Jian Li With the help of cluster-api-provider-kubevirt (capk) project, it is possible to provide a managed Kubernetes service using KubeVirt as the virtualization infrastructure. A managed Kubernetes service is typically implemented by running Kubernetes inside Virtual Machine for the purpose of provisioning flexibility and TCO reduction. However, running Kubernetes on VM will introduce additional networking overhead which in turn dramatically degrades the overall container networking performance. In this presentation, we will introduce a way to maximize the container networking performance of the managed Kubernetes by applying throughout optimization on both management and workload cluster networking stack using SR-IOV and Network Function offloading technologies. With the proposed approach, we achieved line-rate performance on the container networking. 15:00-15:50: Image Logistics and Locality in a Global Multi-Cluster DeploymentPresented by Ryan Beisner & Tomasz Knopik “I need my VMI image and I need it now, everywhere, across multiple clusters, around the world. ” We’ll define our use cases, discuss the challenges that we’ve experienced, and detail the approaches we have taken to tame the topics of image distribution, and image locality across a global multi-cluster KubeVirt deployment. 16:00-16:25: KubeVirt VMs all the way down: a custom-sized networking solution for inceptionist clustersPresented by Miguel Duarte Barroso & Enrique Lleronte Pastora Setting up a Kubernetes cluster is a complex process that relies on several components being configured properly. There are multiple distributions and installers helping with this task, each with different default configurations for clusters and infrastructure providers; while these bootstrap providers reduce installation complexity, they don’t address how to manage a cluster day-to-day or a Kubernetes environment long term. You are still missing important features like automated cluster lifecycle management. The Cluster API project provides declarative, Kubernetes-style APIs to automate cluster creation, configuration, and management. In this presentation, we will focus on CAP-K: a Cluster API provider implemented using KubeVirt VMs. We will discuss some of the challenging requirements that running nested Kubernetes has. Finally, we will offer a comprehensive solution based on OVN, an SDN solution to provide L2/L3 virtual topologies, ACLs, fully distributed DHCP support, and L3 gateways from logical to physical networks. The audience should be familiar with virtualization, and have a basic knowledge of networking. The audience will learn the networking requirements of Kubernetes, how to run nested Kubernetes/KubeVirt clusters, understand what are the challenges of isolating traffic on these nested clusters, and finally, how this can be achieved using OVN to implement the networking infrastructure. 16:30-16:55: High Performance KubeVirt workloads at NVIDIAPresented by Piotr Prokop How NVIDIA ensures predictable low latency for applications running inside KubeVirt VMs. In this talk we will discuss how we configure the network and compute resources for our Virtual Machines and how our latest contributions to KubeVirt and Kubernetes helps us achieve best performance. 17:00-17:25: (Tutorial) Don’t Knock the Docs: Contributing Documentation to the KubeVirt ProjectPresented by Chandler Wilkerson Good documentation is one of the key ways for project like KubeVirt to make the leap from sandbox to incubation to graduation. Whether it is filling in the user guide with brand new instructions on a recently merged capability, taking your first steps to becoming a contributor by catching bugs in existing instructions, or submitting a blog post detailing your own KubeVirt end user experience on the main website, the first step is learning how our documentation repositories are laid out, and how to create PRs to merge new or improved content. Tutorial attendees should expect to learn the basic Git repo structure, how to handle a local container to proof changes to the website, and time permitting, see some of the CI/CD process that governs the actual deployment of the KubeVirt. io webpage and user guide. 17:30-17:55: KubeVirt SIG-Scale - Latest performance and scalability changesPresented by Ryan Hallisey & Alay Patel The KubeVirt SIG-Scale group meets weekly to discuss performance and scalability for KubeVirt. This talk will provide updates from the past year, show performance and scale trends observed in the performance CI jobs. 18:00-18:25: My cluster is running… but does it actually work?Presented by Orel Misan For cases where vanilla KubeVirt does not meet your advanced compute, storage, network, or operational use-cases, you may need to extend its functionality with third party extensions. With this great flexibility must also come great complexity. After investing much time and effort in configuring your cluster, how do you know it actually works? In this talk, I will introduce you to checkups: containerized applications that help you verify whether your cluster is working as expected. I will also demo cluster configuration verification using a checkup. This checkup will verify connectivity between two KubeVirt virtual machines and will measure the network latency. The demo will include how this checkup could be remotely deployed on any cluster, and how its users interact with it from its execution to results retrieval. Basic understanding of Kubernetes operation is required. Knowledge of KubeVirt or networking is not required. 18:30-18:55: Lessons learned maintaining KubeVirt - testingPresented by Qian Xiao & Natalie Bandel Upstream KubeVirt has a large set of functional tests that can be leveraged to run in NVIDIA zones to validate KubeVirt condition. NVIDIA extends the upstream community’s test suite to implement different types of tests. Tests are tweaked/customized from various dimensions(latency, running nodes, etc) to ensure they run successfully in production environments. Using KubeVirt’s test suite provides us with a starting point, however the test suite is not fully extensible. For example, you have to import the entire KubeVirt code base to use some of the functionality. 18:55-19:00: Closing Remarks " }, { - "id": 211, + "id": 210, "url": "/videos/talks", "title": "Talks", "author" : "", "tags" : "", "body": " - {% for item in site. data. videos-talks. list %} KubeVirt Talks Playlist: A playlist of the various KubeVirt talks. {% endfor %}" }, { - "id": 212, + "id": 211, "url": "/videos/tech-demos", "title": "Tech Demos", "author" : "", "tags" : "", "body": " - KubeVirt Basic Operations demo: Basic operations to run KubeVirt from a beginner point of view. KubeVirt Tech Demos Playlist: A playlist of KubeVirt tech demos. " }, { - "id": 213, + "id": 212, "url": "/category/uncategorized.html", "title": "Uncategorized", "author" : "", "tags" : "", "body": " - " }, { - "id": 214, + "id": 213, "url": "/blogs/uncategorized", "title": "Uncategorized", "author" : "", "tags" : "", "body": " - {{ page. navbar_active }} {% include sidebar-blogs. html %} {% for post in site. posts %} {% if post. categories contains uncategorized %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html href_link= readmorelink %} {% endif %} {% endfor %} " }, { - "id": 215, + "id": 214, "url": "/blogs/updates", "title": "Weekly Updates", "author" : "", "tags" : "", "body": " - {{ page. navbar_active }} {% include sidebar-blogs. html %} {% for post in site. posts %} {% if post. categories contains updates %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html href_link= readmorelink %} {% endif %} {% endfor %} " }, { - "id": 216, + "id": 215, "url": "/videos/", "title": "Videos", "author" : "", "tags" : "", "body": " - " }, { - "id": 217, + "id": 216, "url": "/category/weekly-updates.html", "title": "Weekly Updates", "author" : "", "tags" : "", "body": " - " - }, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , { - "id": 218, + }, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , { + "id": 217, "url": "/blogs/page2/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 219, + "id": 218, "url": "/blogs/page3/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 220, + "id": 219, "url": "/blogs/page4/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 221, + "id": 220, "url": "/blogs/page5/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 222, + "id": 221, "url": "/blogs/page6/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 223, + "id": 222, "url": "/blogs/page7/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 224, + "id": 223, "url": "/blogs/page8/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 225, + "id": 224, "url": "/blogs/page9/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 226, + "id": 225, "url": "/blogs/page10/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 227, + "id": 226, "url": "/blogs/page11/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 228, + "id": 227, "url": "/blogs/page12/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 229, + "id": 228, "url": "/blogs/page13/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 230, + "id": 229, "url": "/blogs/page14/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 231, + "id": 230, "url": "/blogs/page15/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 232, + "id": 231, "url": "/blogs/page16/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 233, + "id": 232, "url": "/blogs/page17/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 234, + "id": 233, "url": "/blogs/page18/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 235, + "id": 234, "url": "/blogs/page19/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 236, + "id": 235, "url": "/blogs/page20/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 237, + "id": 236, "url": "/blogs/page21/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 238, + "id": 237, "url": "/blogs/page22/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 239, + "id": 238, "url": "/blogs/page23/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 240, + "id": 239, "url": "/blogs/page24/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 241, + "id": 240, "url": "/blogs/page25/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 242, + "id": 241, "url": "/blogs/page26/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 243, + "id": 242, "url": "/blogs/page27/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 244, + "id": 243, "url": "/blogs/page28/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 245, + "id": 244, "url": "/blogs/page29/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 246, + "id": 245, "url": "/blogs/page30/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 247, + "id": 246, "url": "/blogs/page31/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 248, + "id": 247, "url": "/blogs/page32/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 249, + "id": 248, "url": "/blogs/page33/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 250, + "id": 249, "url": "/blogs/page34/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 251, + "id": 250, "url": "/blogs/page35/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, { - "id": 252, + "id": 251, "url": "/blogs/page36/", "title": "Blogs", "author" : "", "tags" : "", "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " - }, { - "id": 253, - "url": "/blogs/page37/", - "title": "Blogs", - "author" : "", - "tags" : "", - "body": " - {{ page. title }} {% if site. data. blogs_toc. toc[0] %} {% for item in site. data. blogs_toc. toc %} {% if item. subfolderitems[0] %} {{ item. title }}: {% for entry in item. subfolderitems %} <a href= {{ entry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ entry. page }} </a> {% if entry. subsubfolderitems[0] %} {% for subentry in entry. subsubfolderitems %} <a href= {{ subentry. url }} {% if page. title == entry. page %} class= blogs-navigation--item_link active {% else %} class= blogs-navigation--item_link {% endif %}> {{ subentry. page }} </a> {% endfor %} {% endif %} {% endfor %} {% endif %} {% endfor %} {% endif %} {% for post in paginator. posts %} {{ post. title }}: {{ post. pub-date }}, {{ post. pub-year }} {{ post. description | strip_html | truncatewords:50 }} {% capture readmorelink %} {{ post. url | prepend: site. baseurl }} {% endcapture %} {% include readmore. html %} {% endfor %} {% include paging. html %} " }, , , , , , , , , ]; var idx = lunr(function () { diff --git a/sitemap.xml b/sitemap.xml index 887c22ab87..00e438e5ba 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -389,10 +389,6 @@ 2019-11-04T00:00:00+00:00 -https://kubevirt.io//2019/Access-Virtual-Machines-graphic-console-using-noVNC.html -2019-11-11T00:00:00+00:00 - - https://kubevirt.io//2019/kubecon-na-2019.html 2019-11-12T00:00:00+00:00 @@ -1210,12 +1206,6 @@ https://kubevirt.io//tag/jenkins.html -https://kubevirt.io//tag/novnc.html - - -https://kubevirt.io//tag/console.html - - https://kubevirt.io//tag/kubecon.html @@ -1294,6 +1284,9 @@ https://kubevirt.io//tag/cockpit.html +https://kubevirt.io//tag/novnc.html + + https://kubevirt.io//tag/user-interface.html @@ -1674,7 +1667,4 @@ https://kubevirt.io//blogs/page36/ - -https://kubevirt.io//blogs/page37/ - diff --git a/ssp-operator/index.html b/ssp-operator/index.html index fc9428ad2a..fce99e4395 100644 --- a/ssp-operator/index.html +++ b/ssp-operator/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/summit/index.html b/summit/index.html index df8467b4ff..91e75b58e1 100644 --- a/summit/index.html +++ b/summit/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/tag/addons.html b/tag/addons.html index 465db716db..46c1524306 100644 --- a/tag/addons.html +++ b/tag/addons.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/admin-operations.html b/tag/admin-operations.html index 93a9db82e3..e8b51d5cce 100644 --- a/tag/admin-operations.html +++ b/tag/admin-operations.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/admin.html b/tag/admin.html index 9f18eb10c8..70f8b6a571 100644 --- a/tag/admin.html +++ b/tag/admin.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/advanced-vm-scheduling.html b/tag/advanced-vm-scheduling.html index 1d24bdfc98..e3e8f9f60d 100644 --- a/tag/advanced-vm-scheduling.html +++ b/tag/advanced-vm-scheduling.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/affinity.html b/tag/affinity.html index 4b8bba8576..0125a67b3c 100644 --- a/tag/affinity.html +++ b/tag/affinity.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/america.html b/tag/america.html index 38aa8aaf7b..98f20c71ea 100644 --- a/tag/america.html +++ b/tag/america.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ami.html b/tag/ami.html index 48b9e890ca..ce2b089d15 100644 --- a/tag/ami.html +++ b/tag/ami.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ansible-collection.html b/tag/ansible-collection.html index a18356f99d..a1185e40e8 100644 --- a/tag/ansible-collection.html +++ b/tag/ansible-collection.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ansible.html b/tag/ansible.html index 4c13ab8004..b42c3e6984 100644 --- a/tag/ansible.html +++ b/tag/ansible.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/api.html b/tag/api.html index 3f4beb5542..3876666bf5 100644 --- a/tag/api.html +++ b/tag/api.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/architecture.html b/tag/architecture.html index 4c4e21ee59..ba3a3be835 100644 --- a/tag/architecture.html +++ b/tag/architecture.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/authentication.html b/tag/authentication.html index 44c0a41188..0145d24814 100644 --- a/tag/authentication.html +++ b/tag/authentication.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/autodeployer.html b/tag/autodeployer.html index 8b5d3eea2c..640628b9cd 100644 --- a/tag/autodeployer.html +++ b/tag/autodeployer.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/aws.html b/tag/aws.html index 3cc627b1e2..b2c92acded 100644 --- a/tag/aws.html +++ b/tag/aws.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/basic-operations.html b/tag/basic-operations.html index f4ced79da3..eeb459beef 100644 --- a/tag/basic-operations.html +++ b/tag/basic-operations.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/bridge.html b/tag/bridge.html index f2727c3982..89d8462b92 100644 --- a/tag/bridge.html +++ b/tag/bridge.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/build.html b/tag/build.html index 05a92f6884..539e2a5089 100644 --- a/tag/build.html +++ b/tag/build.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/builder-tool.html b/tag/builder-tool.html index b08e27c777..5bc64449c4 100644 --- a/tag/builder-tool.html +++ b/tag/builder-tool.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cdi.html b/tag/cdi.html index a03fb451f8..0c838d547d 100644 --- a/tag/cdi.html +++ b/tag/cdi.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ceph.html b/tag/ceph.html index d1fe6f28af..3b03d1aed2 100644 --- a/tag/ceph.html +++ b/tag/ceph.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/changelog.html b/tag/changelog.html index 622aae19f8..e799aeb6de 100644 --- a/tag/changelog.html +++ b/tag/changelog.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -498,7 +498,6 @@

    Other tags to explore

    - @@ -576,7 +575,7 @@

    Other tags to explore

    - + diff --git a/tag/chronyd.html b/tag/chronyd.html index eb7285709b..f0d3643f57 100644 --- a/tag/chronyd.html +++ b/tag/chronyd.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ci-cd.html b/tag/ci-cd.html index 03efbe9b0f..46c8570368 100644 --- a/tag/ci-cd.html +++ b/tag/ci-cd.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/cicd.html b/tag/cicd.html index 430b9d3aee..4312cf4dfc 100644 --- a/tag/cicd.html +++ b/tag/cicd.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/clearcontainers.html b/tag/clearcontainers.html index 3d6108582c..ab7055a7db 100644 --- a/tag/clearcontainers.html +++ b/tag/clearcontainers.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/clone.html b/tag/clone.html index df03f69c1b..7c56644e05 100644 --- a/tag/clone.html +++ b/tag/clone.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cloudnativecon.html b/tag/cloudnativecon.html index 1f310d4066..6fbcad34e1 100644 --- a/tag/cloudnativecon.html +++ b/tag/cloudnativecon.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cluster-autoscaler.html b/tag/cluster-autoscaler.html index f41a60464f..6b0e4407da 100644 --- a/tag/cluster-autoscaler.html +++ b/tag/cluster-autoscaler.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cluster-network-addons-operator.html b/tag/cluster-network-addons-operator.html index df0030abe6..e3086b302b 100644 --- a/tag/cluster-network-addons-operator.html +++ b/tag/cluster-network-addons-operator.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cnao.html b/tag/cnao.html index e1135c6dc9..0830c53fe2 100644 --- a/tag/cnao.html +++ b/tag/cnao.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cncf.html b/tag/cncf.html index 9528fe4579..b48eb197a2 100644 --- a/tag/cncf.html +++ b/tag/cncf.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/cni.html b/tag/cni.html index 987cc9c511..5549f93d8a 100644 --- a/tag/cni.html +++ b/tag/cni.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/cockpit.html b/tag/cockpit.html index 00d34cf398..35337c0cf9 100644 --- a/tag/cockpit.html +++ b/tag/cockpit.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/common-templates.html b/tag/common-templates.html index be45c68f03..688acd0dc0 100644 --- a/tag/common-templates.html +++ b/tag/common-templates.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/community.html b/tag/community.html index 30d2803096..76af9ed886 100644 --- a/tag/community.html +++ b/tag/community.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -348,7 +348,6 @@

    Other tags to explore

    - @@ -426,7 +425,7 @@

    Other tags to explore

    - + diff --git a/tag/composer-cli.html b/tag/composer-cli.html index 97784c57d9..b2fa612730 100644 --- a/tag/composer-cli.html +++ b/tag/composer-cli.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/condition-types.html b/tag/condition-types.html index 892a7040c0..7ae6036c02 100644 --- a/tag/condition-types.html +++ b/tag/condition-types.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/conference.html b/tag/conference.html index 2ce4fdef75..dcd14e3c43 100644 --- a/tag/conference.html +++ b/tag/conference.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/connect-to-console.html b/tag/connect-to-console.html index dcb09ced0e..3f87a95cdf 100644 --- a/tag/connect-to-console.html +++ b/tag/connect-to-console.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/connect-to-ssh.html b/tag/connect-to-ssh.html index 630e74cb35..267124bea2 100644 --- a/tag/connect-to-ssh.html +++ b/tag/connect-to-ssh.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/console.html b/tag/console.html deleted file mode 100644 index a156fd39fc..0000000000 --- a/tag/console.html +++ /dev/null @@ -1,560 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - KubeVirt.io - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    -

    -
    -

    Articles tagged with console

    - -

    Other tags to explore

    -
    - - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - -
    - -
    -
    - -
    - - - - - - - - - - - - - - - - - - - diff --git a/tag/container.html b/tag/container.html index 45efe98090..8c89de5981 100644 --- a/tag/container.html +++ b/tag/container.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/containerdisk.html b/tag/containerdisk.html index 901f9c2e22..d7262077ef 100644 --- a/tag/containerdisk.html +++ b/tag/containerdisk.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/containerized-data-importer.html b/tag/containerized-data-importer.html index 3eaa12f004..d1367cc11b 100644 --- a/tag/containerized-data-importer.html +++ b/tag/containerized-data-importer.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/continuous-integration.html b/tag/continuous-integration.html index 8f376c4685..77a56d8f87 100644 --- a/tag/continuous-integration.html +++ b/tag/continuous-integration.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/contra-lib.html b/tag/contra-lib.html index 8666d71cf3..2f2461a67a 100644 --- a/tag/contra-lib.html +++ b/tag/contra-lib.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/coreos.html b/tag/coreos.html index 8ea37298db..9003402b85 100644 --- a/tag/coreos.html +++ b/tag/coreos.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cpu-pinning.html b/tag/cpu-pinning.html index 369a1c0996..4dc154c4b1 100644 --- a/tag/cpu-pinning.html +++ b/tag/cpu-pinning.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cpumanager.html b/tag/cpumanager.html index c8147382ae..58eb548a7c 100644 --- a/tag/cpumanager.html +++ b/tag/cpumanager.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/create-vm.html b/tag/create-vm.html index 7418697e8a..a2c23ec3aa 100644 --- a/tag/create-vm.html +++ b/tag/create-vm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/cri-o.html b/tag/cri-o.html index 1e0438ec12..f768ea9242 100644 --- a/tag/cri-o.html +++ b/tag/cri-o.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/cri.html b/tag/cri.html index 4e656dfc5b..df986fa1cc 100644 --- a/tag/cri.html +++ b/tag/cri.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/custom-resources.html b/tag/custom-resources.html index 4f0cc26138..aba93e7059 100644 --- a/tag/custom-resources.html +++ b/tag/custom-resources.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/datavolumes.html b/tag/datavolumes.html index 5b4fdf1b3a..dc399f406d 100644 --- a/tag/datavolumes.html +++ b/tag/datavolumes.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/debug.html b/tag/debug.html index 6b235b3bcd..bd7ab1c10a 100644 --- a/tag/debug.html +++ b/tag/debug.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/dedicated-network.html b/tag/dedicated-network.html index 7a64b4c382..efd214c41f 100644 --- a/tag/dedicated-network.html +++ b/tag/dedicated-network.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/design.html b/tag/design.html index 506c09cdac..577ec5be43 100644 --- a/tag/design.html +++ b/tag/design.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/development.html b/tag/development.html index e098303ec7..a5f451f5b0 100644 --- a/tag/development.html +++ b/tag/development.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/device-plugins.html b/tag/device-plugins.html index 24ce30be70..085349f16d 100644 --- a/tag/device-plugins.html +++ b/tag/device-plugins.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/disk-image.html b/tag/disk-image.html index 6c3c012573..312a5a2ba0 100644 --- a/tag/disk-image.html +++ b/tag/disk-image.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/docker.html b/tag/docker.html index 3df7e42b23..bcec4dedd0 100644 --- a/tag/docker.html +++ b/tag/docker.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ebtables.html b/tag/ebtables.html index 03667e3f80..07c7a5ba43 100644 --- a/tag/ebtables.html +++ b/tag/ebtables.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ec2.html b/tag/ec2.html index fd62ce9973..fe814c9c6b 100644 --- a/tag/ec2.html +++ b/tag/ec2.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/eks.html b/tag/eks.html index 9f46b78d1e..65241cf1cb 100644 --- a/tag/eks.html +++ b/tag/eks.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/event.html b/tag/event.html index 6e9dc83c03..c79dec06a3 100644 --- a/tag/event.html +++ b/tag/event.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -338,7 +338,6 @@

    Other tags to explore

    - @@ -416,7 +415,7 @@

    Other tags to explore

    - + diff --git a/tag/eviction.html b/tag/eviction.html index ec008f0fd5..752ca3d154 100644 --- a/tag/eviction.html +++ b/tag/eviction.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/federation.html b/tag/federation.html index aa59c96784..f01e9167ae 100644 --- a/tag/federation.html +++ b/tag/federation.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/fedora.html b/tag/fedora.html index 0377fcb1ca..114e527659 100644 --- a/tag/fedora.html +++ b/tag/fedora.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/flannel.html b/tag/flannel.html index 73480e43f6..08cbae2ab5 100644 --- a/tag/flannel.html +++ b/tag/flannel.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/gathering.html b/tag/gathering.html index 029677c578..e9df81b43c 100644 --- a/tag/gathering.html +++ b/tag/gathering.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/gcp.html b/tag/gcp.html index ed858d4a46..bd01ac2193 100644 --- a/tag/gcp.html +++ b/tag/gcp.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/glusterfs.html b/tag/glusterfs.html index ccde57f61b..96c61492cc 100644 --- a/tag/glusterfs.html +++ b/tag/glusterfs.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/go.html b/tag/go.html index 6a78d49bd2..55c8e94fc0 100644 --- a/tag/go.html +++ b/tag/go.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/gpu-workloads.html b/tag/gpu-workloads.html index ed5395e039..71a6d1ce84 100644 --- a/tag/gpu-workloads.html +++ b/tag/gpu-workloads.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/gpu.html b/tag/gpu.html index f013c9acd0..e60f1be086 100644 --- a/tag/gpu.html +++ b/tag/gpu.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/grafana.html b/tag/grafana.html index 2fcd3c2eb9..37efe47624 100644 --- a/tag/grafana.html +++ b/tag/grafana.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/hco.html b/tag/hco.html index b0858ee89b..7d0686faab 100644 --- a/tag/hco.html +++ b/tag/hco.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/heketi.html b/tag/heketi.html index a3b4478250..2d54f29d8a 100644 --- a/tag/heketi.html +++ b/tag/heketi.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/hilights.html b/tag/hilights.html index 18dc60a156..fac60585d5 100644 --- a/tag/hilights.html +++ b/tag/hilights.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/homelab.html b/tag/homelab.html index 391aab3c88..72512183e8 100644 --- a/tag/homelab.html +++ b/tag/homelab.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/hugepages.html b/tag/hugepages.html index 1c8d57f721..3f9128b909 100644 --- a/tag/hugepages.html +++ b/tag/hugepages.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/hyperconverged-operator.html b/tag/hyperconverged-operator.html index f35942e212..3565b9f47f 100644 --- a/tag/hyperconverged-operator.html +++ b/tag/hyperconverged-operator.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/iac.html b/tag/iac.html index 2afa38c701..6e102e288b 100644 --- a/tag/iac.html +++ b/tag/iac.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ignition.html b/tag/ignition.html index 1204174a9e..ace8b91775 100644 --- a/tag/ignition.html +++ b/tag/ignition.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/images.html b/tag/images.html index d5a6bd773e..f96e7988a0 100644 --- a/tag/images.html +++ b/tag/images.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/import.html b/tag/import.html index ea321112fe..5378d281cf 100644 --- a/tag/import.html +++ b/tag/import.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/infrastructure.html b/tag/infrastructure.html index 6fa33fd4fe..0146db9036 100644 --- a/tag/infrastructure.html +++ b/tag/infrastructure.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/installing-kubevirt.html b/tag/installing-kubevirt.html index 450170ce0b..218833b93e 100644 --- a/tag/installing-kubevirt.html +++ b/tag/installing-kubevirt.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/instancetypes.html b/tag/instancetypes.html index c530397f95..69e149e61b 100644 --- a/tag/instancetypes.html +++ b/tag/instancetypes.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/intel.html b/tag/intel.html index f5c0c54aed..0812f17fb3 100644 --- a/tag/intel.html +++ b/tag/intel.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/iptables.html b/tag/iptables.html index c6915d4b9f..4212364be8 100644 --- a/tag/iptables.html +++ b/tag/iptables.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/istio.html b/tag/istio.html index 59b7dbcfa5..bc1180cf1f 100644 --- a/tag/istio.html +++ b/tag/istio.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/jenkins.html b/tag/jenkins.html index 1e221a1463..9164d64035 100644 --- a/tag/jenkins.html +++ b/tag/jenkins.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/kubecon.html b/tag/kubecon.html index 071473c87f..ac3177a663 100644 --- a/tag/kubecon.html +++ b/tag/kubecon.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/kubefed.html b/tag/kubefed.html index 2b36f90d45..fff001080e 100644 --- a/tag/kubefed.html +++ b/tag/kubefed.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubernetes-nmstate.html b/tag/kubernetes-nmstate.html index bce943ca4c..b98ffa49ed 100644 --- a/tag/kubernetes-nmstate.html +++ b/tag/kubernetes-nmstate.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubernetes.html b/tag/kubernetes.html index fa32c311d0..acd4265d70 100644 --- a/tag/kubernetes.html +++ b/tag/kubernetes.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -342,7 +342,6 @@

    Other tags to explore

    - @@ -420,7 +419,7 @@

    Other tags to explore

    - + diff --git a/tag/kubetron.html b/tag/kubetron.html index 28a111e1bf..0fe0c192d3 100644 --- a/tag/kubetron.html +++ b/tag/kubetron.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-ansible.html b/tag/kubevirt-ansible.html index a2d3ec3d00..c5703a40bc 100644 --- a/tag/kubevirt-ansible.html +++ b/tag/kubevirt-ansible.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-hyperconverged.html b/tag/kubevirt-hyperconverged.html index 0e5bb83c33..23f34653c3 100644 --- a/tag/kubevirt-hyperconverged.html +++ b/tag/kubevirt-hyperconverged.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-installation.html b/tag/kubevirt-installation.html index 26b3b74928..87fa3f86e0 100644 --- a/tag/kubevirt-installation.html +++ b/tag/kubevirt-installation.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-objects.html b/tag/kubevirt-objects.html index 6b5409e5be..2c66dcde35 100644 --- a/tag/kubevirt-objects.html +++ b/tag/kubevirt-objects.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-tekton-tasks.html b/tag/kubevirt-tekton-tasks.html index df6d024421..e35e603227 100644 --- a/tag/kubevirt-tekton-tasks.html +++ b/tag/kubevirt-tekton-tasks.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-tutorial.html b/tag/kubevirt-tutorial.html index fe9eda132e..615fe838d0 100644 --- a/tag/kubevirt-tutorial.html +++ b/tag/kubevirt-tutorial.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt-upgrade.html b/tag/kubevirt-upgrade.html index 518966bae2..c41627fcf2 100644 --- a/tag/kubevirt-upgrade.html +++ b/tag/kubevirt-upgrade.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt.core.html b/tag/kubevirt.core.html index 1333fb25ac..1aedef926d 100644 --- a/tag/kubevirt.core.html +++ b/tag/kubevirt.core.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirt.html b/tag/kubevirt.html index ff0a22f565..58e7ac33a4 100644 --- a/tag/kubevirt.html +++ b/tag/kubevirt.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -340,7 +340,6 @@

    Other tags to explore

    - @@ -418,7 +417,7 @@

    Other tags to explore

    - + diff --git a/tag/kubevirtci.html b/tag/kubevirtci.html index 81649cb0ad..27979ba8d5 100644 --- a/tag/kubevirtci.html +++ b/tag/kubevirtci.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/kvm.html b/tag/kvm.html index a936f16926..ddf4a758cd 100644 --- a/tag/kvm.html +++ b/tag/kvm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/lab.html b/tag/lab.html index 491ebddda5..6c260854a2 100644 --- a/tag/lab.html +++ b/tag/lab.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/laboratory.html b/tag/laboratory.html index aabc402e20..45e03566bc 100644 --- a/tag/laboratory.html +++ b/tag/laboratory.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/libvirt.html b/tag/libvirt.html index 8461ce30c3..cba36187ca 100644 --- a/tag/libvirt.html +++ b/tag/libvirt.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/lifecycle.html b/tag/lifecycle.html index 99c616c05b..af0c5ce7cf 100644 --- a/tag/lifecycle.html +++ b/tag/lifecycle.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/live-migration.html b/tag/live-migration.html index ad0b8f243f..410c004aa4 100644 --- a/tag/live-migration.html +++ b/tag/live-migration.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/load-balancer.html b/tag/load-balancer.html index 47fcba5ca6..fbeaa4a4dc 100644 --- a/tag/load-balancer.html +++ b/tag/load-balancer.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/memory.html b/tag/memory.html index 0661c3d32a..ce809716bf 100644 --- a/tag/memory.html +++ b/tag/memory.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/mesh.html b/tag/mesh.html index e7429d604b..af9efbd6b2 100644 --- a/tag/mesh.html +++ b/tag/mesh.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/metallb.html b/tag/metallb.html index 2c8c87d7c4..d11fd126ea 100644 --- a/tag/metallb.html +++ b/tag/metallb.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/metrics.html b/tag/metrics.html index 0a120a6ceb..27ea743c78 100644 --- a/tag/metrics.html +++ b/tag/metrics.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/microsoft-windows-container.html b/tag/microsoft-windows-container.html index 34c491d4d7..53c5600aa5 100644 --- a/tag/microsoft-windows-container.html +++ b/tag/microsoft-windows-container.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/microsoft-windows-kubernetes.html b/tag/microsoft-windows-kubernetes.html index 9cb84a643c..c42875e569 100644 --- a/tag/microsoft-windows-kubernetes.html +++ b/tag/microsoft-windows-kubernetes.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/milestone.html b/tag/milestone.html index ed52bc38ec..66b415e5f8 100644 --- a/tag/milestone.html +++ b/tag/milestone.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/minikube.html b/tag/minikube.html index d3c37aa768..db17de2b14 100644 --- a/tag/minikube.html +++ b/tag/minikube.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/monitoring.html b/tag/monitoring.html index 2899f3e7e6..98064ec41e 100644 --- a/tag/monitoring.html +++ b/tag/monitoring.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/multicluster.html b/tag/multicluster.html index d98e592734..483720b306 100644 --- a/tag/multicluster.html +++ b/tag/multicluster.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/multiple-networks.html b/tag/multiple-networks.html index eab1402623..5cdac3e1d3 100644 --- a/tag/multiple-networks.html +++ b/tag/multiple-networks.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/multus.html b/tag/multus.html index bc6ccefba2..bacdea896b 100644 --- a/tag/multus.html +++ b/tag/multus.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -336,7 +336,6 @@

    Other tags to explore

    - @@ -414,7 +413,7 @@

    Other tags to explore

    - + diff --git a/tag/network.html b/tag/network.html index 97c1342045..5fd4bdbbfa 100644 --- a/tag/network.html +++ b/tag/network.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/networking.html b/tag/networking.html index 80b6f41770..33828a4cbc 100644 --- a/tag/networking.html +++ b/tag/networking.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/networkpolicy.html b/tag/networkpolicy.html index d01161a2b3..11431cbce9 100644 --- a/tag/networkpolicy.html +++ b/tag/networkpolicy.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/neutron.html b/tag/neutron.html index b8a38d9e6b..ade7fd232f 100644 --- a/tag/neutron.html +++ b/tag/neutron.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/nmo.html b/tag/nmo.html index 65c0a6f38c..406a7c0c09 100644 --- a/tag/nmo.html +++ b/tag/nmo.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/nmstate.html b/tag/nmstate.html index 1a0a11fcf9..a93e79497e 100644 --- a/tag/nmstate.html +++ b/tag/nmstate.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/node-drain.html b/tag/node-drain.html index 90e6b1b808..e483b387ad 100644 --- a/tag/node-drain.html +++ b/tag/node-drain.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/node-exporter.html b/tag/node-exporter.html index 018f582c12..311bbd2b50 100644 --- a/tag/node-exporter.html +++ b/tag/node-exporter.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/novnc.html b/tag/novnc.html index c8ca988673..710fbdc5fc 100644 --- a/tag/novnc.html +++ b/tag/novnc.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -242,8 +242,6 @@

    Articles tagged with noVNC

  • KubeVirt user interface options (17 Dec 2019 | , , , , , , , , )
  • -
  • Access Virtual Machines' graphic console using noVNC (11 Nov 2019 | , )
  • -

    Other tags to explore

    @@ -332,7 +330,6 @@

    Other tags to explore

    - @@ -410,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ntp.html b/tag/ntp.html index c71d972bc5..971959e7f0 100644 --- a/tag/ntp.html +++ b/tag/ntp.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/numa.html b/tag/numa.html index a90563b842..aaaf4bf27f 100644 --- a/tag/numa.html +++ b/tag/numa.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/nvidia.html b/tag/nvidia.html index d035438e0e..c3d37fcd13 100644 --- a/tag/nvidia.html +++ b/tag/nvidia.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/objects.html b/tag/objects.html index 4a3511186b..64c0d90d1c 100644 --- a/tag/objects.html +++ b/tag/objects.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/octant.html b/tag/octant.html index 08c1d384e3..044175f130 100644 --- a/tag/octant.html +++ b/tag/octant.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/okd-console.html b/tag/okd-console.html index 93c45fbdbc..d50a3abf61 100644 --- a/tag/okd-console.html +++ b/tag/okd-console.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/okd.html b/tag/okd.html index 1040fd1047..820d16d548 100644 --- a/tag/okd.html +++ b/tag/okd.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/openshift-console.html b/tag/openshift-console.html index f8a5719e16..46c9fb95bd 100644 --- a/tag/openshift-console.html +++ b/tag/openshift-console.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/openshift-web-console.html b/tag/openshift-web-console.html index 9974b40bb3..26fc4327d8 100644 --- a/tag/openshift-web-console.html +++ b/tag/openshift-web-console.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/openshift.html b/tag/openshift.html index 8d4e1cf4cd..a5479fed27 100644 --- a/tag/openshift.html +++ b/tag/openshift.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/openstack.html b/tag/openstack.html index e2dc7ac9bd..f35221fbf3 100644 --- a/tag/openstack.html +++ b/tag/openstack.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/operation.html b/tag/operation.html index 2dd318501c..4ccfb2c96a 100644 --- a/tag/operation.html +++ b/tag/operation.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/operations.html b/tag/operations.html index a5ee215ff6..53a4de97be 100644 --- a/tag/operations.html +++ b/tag/operations.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/operator-manual.html b/tag/operator-manual.html index bc4a8c6eb5..438d43a2ee 100644 --- a/tag/operator-manual.html +++ b/tag/operator-manual.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/overcommitment.html b/tag/overcommitment.html index fe9acd5e3a..b597d5b07f 100644 --- a/tag/overcommitment.html +++ b/tag/overcommitment.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ovirt.html b/tag/ovirt.html index 0dec064eb9..f8a73e6bb5 100644 --- a/tag/ovirt.html +++ b/tag/ovirt.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/ovn.html b/tag/ovn.html index 9758ccb261..3b56fc38a3 100644 --- a/tag/ovn.html +++ b/tag/ovn.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/ovs-cni.html b/tag/ovs-cni.html index a0887dba3c..29987e161d 100644 --- a/tag/ovs-cni.html +++ b/tag/ovs-cni.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/party-time.html b/tag/party-time.html index 03f64735b5..f9e7c78986 100644 --- a/tag/party-time.html +++ b/tag/party-time.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/pass-through.html b/tag/pass-through.html index 7d5b77db60..8880e17567 100644 --- a/tag/pass-through.html +++ b/tag/pass-through.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/passthrough.html b/tag/passthrough.html index 8d4ee57a5f..03679e86dd 100644 --- a/tag/passthrough.html +++ b/tag/passthrough.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/preferences.html b/tag/preferences.html index f3587554a9..c17cfe9956 100644 --- a/tag/preferences.html +++ b/tag/preferences.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/prometheus-operator.html b/tag/prometheus-operator.html index 312a9f6edd..d8620caa61 100644 --- a/tag/prometheus-operator.html +++ b/tag/prometheus-operator.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/prometheus.html b/tag/prometheus.html index 76feb3e04a..6ac578e922 100644 --- a/tag/prometheus.html +++ b/tag/prometheus.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/prow.html b/tag/prow.html index 3bc7a36350..37d8d4979d 100644 --- a/tag/prow.html +++ b/tag/prow.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/qemu.html b/tag/qemu.html index 8f17eb06fe..f05c818bad 100644 --- a/tag/qemu.html +++ b/tag/qemu.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/quickstart.html b/tag/quickstart.html index 00713a0bdd..5cdca85894 100644 --- a/tag/quickstart.html +++ b/tag/quickstart.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/rbac.html b/tag/rbac.html index 34fea6a11f..71ff41be5d 100644 --- a/tag/rbac.html +++ b/tag/rbac.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/real-time.html b/tag/real-time.html index 1f4c2883a1..6cc5d8f12d 100644 --- a/tag/real-time.html +++ b/tag/real-time.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/registry.html b/tag/registry.html index 0a1a161e67..ca90b705d3 100644 --- a/tag/registry.html +++ b/tag/registry.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/release-notes.html b/tag/release-notes.html index 64535ea642..01df6ce7fa 100644 --- a/tag/release-notes.html +++ b/tag/release-notes.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -502,7 +502,6 @@

    Other tags to explore

    - @@ -580,7 +579,7 @@

    Other tags to explore

    - + diff --git a/tag/release.html b/tag/release.html index 7f8b99452c..3664ff66b3 100644 --- a/tag/release.html +++ b/tag/release.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/remove-vm.html b/tag/remove-vm.html index 0d436c395d..4b6b99d5f7 100644 --- a/tag/remove-vm.html +++ b/tag/remove-vm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/review.html b/tag/review.html index 2c96582512..16c3c4027d 100644 --- a/tag/review.html +++ b/tag/review.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/rhcos.html b/tag/rhcos.html index 65c882a0d6..50c34c9c93 100644 --- a/tag/rhcos.html +++ b/tag/rhcos.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/roadmap.html b/tag/roadmap.html index 880f44117b..903485a355 100644 --- a/tag/roadmap.html +++ b/tag/roadmap.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/roles.html b/tag/roles.html index 92403a3760..36da115b59 100644 --- a/tag/roles.html +++ b/tag/roles.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/rook.html b/tag/rook.html index 9216b6a3b2..9964d28792 100644 --- a/tag/rook.html +++ b/tag/rook.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/sandbox.html b/tag/sandbox.html index 836d7d1114..990fd81c66 100644 --- a/tag/sandbox.html +++ b/tag/sandbox.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/scheduling.html b/tag/scheduling.html index 77a01b721d..4fded09705 100644 --- a/tag/scheduling.html +++ b/tag/scheduling.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/sdn.html b/tag/sdn.html index e27871cd7b..ac15ef62c7 100644 --- a/tag/sdn.html +++ b/tag/sdn.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/security.html b/tag/security.html index c185d1375d..dd1e22a1df 100644 --- a/tag/security.html +++ b/tag/security.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/service-mesh.html b/tag/service-mesh.html index 7e0ed34456..184d7915e9 100644 --- a/tag/service-mesh.html +++ b/tag/service-mesh.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/serviceaccount.html b/tag/serviceaccount.html index 61ae0e3887..b6689d10d3 100644 --- a/tag/serviceaccount.html +++ b/tag/serviceaccount.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/skydive.html b/tag/skydive.html index 2b5dac6be6..36948f8005 100644 --- a/tag/skydive.html +++ b/tag/skydive.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/start-vm.html b/tag/start-vm.html index e4aec63bc1..bab8998d6a 100644 --- a/tag/start-vm.html +++ b/tag/start-vm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/stop-vm.html b/tag/stop-vm.html index 228efb75ac..710d6a9e66 100644 --- a/tag/stop-vm.html +++ b/tag/stop-vm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/storage.html b/tag/storage.html index 6213fa7f70..7e37c4a3cf 100644 --- a/tag/storage.html +++ b/tag/storage.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -334,7 +334,6 @@

    Other tags to explore

    - @@ -412,7 +411,7 @@

    Other tags to explore

    - + diff --git a/tag/talk.html b/tag/talk.html index 6bcab9bfe9..a082a86faa 100644 --- a/tag/talk.html +++ b/tag/talk.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/tekton-pipelines.html b/tag/tekton-pipelines.html index 543abc8594..006e725bce 100644 --- a/tag/tekton-pipelines.html +++ b/tag/tekton-pipelines.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/topologykeys.html b/tag/topologykeys.html index 2aacd6ec61..a935557ad7 100644 --- a/tag/topologykeys.html +++ b/tag/topologykeys.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/tproxy.html b/tag/tproxy.html index 40d934f662..96227dc96e 100644 --- a/tag/tproxy.html +++ b/tag/tproxy.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/unit-testing.html b/tag/unit-testing.html index 2e2d15f380..8a1905cf72 100644 --- a/tag/unit-testing.html +++ b/tag/unit-testing.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/upgrading.html b/tag/upgrading.html index 7b160b8b8c..f357b9b169 100644 --- a/tag/upgrading.html +++ b/tag/upgrading.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/upload.html b/tag/upload.html index fb6c96d7d2..1e0f4ad4a4 100644 --- a/tag/upload.html +++ b/tag/upload.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/use-kubevirt.html b/tag/use-kubevirt.html index 594c40bce7..aad076c4b2 100644 --- a/tag/use-kubevirt.html +++ b/tag/use-kubevirt.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/user-interface.html b/tag/user-interface.html index 5f389a34e4..bdc1f08ad3 100644 --- a/tag/user-interface.html +++ b/tag/user-interface.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/v1.0.html b/tag/v1.0.html index f29bd564ab..cade222d6d 100644 --- a/tag/v1.0.html +++ b/tag/v1.0.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/v1.1.0.html b/tag/v1.1.0.html index ccbb787761..50d0172464 100644 --- a/tag/v1.1.0.html +++ b/tag/v1.1.0.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/vagrant.html b/tag/vagrant.html index 4b14bc717a..2788becff9 100644 --- a/tag/vagrant.html +++ b/tag/vagrant.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/vgpu.html b/tag/vgpu.html index 879e0859cd..7cd05e8404 100644 --- a/tag/vgpu.html +++ b/tag/vgpu.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/video.html b/tag/video.html index c969fb0b15..51309a06f5 100644 --- a/tag/video.html +++ b/tag/video.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virt-customize.html b/tag/virt-customize.html index d3616082e8..39f8f56cef 100644 --- a/tag/virt-customize.html +++ b/tag/virt-customize.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtlet.html b/tag/virtlet.html index 125df65bae..82eab8a84c 100644 --- a/tag/virtlet.html +++ b/tag/virtlet.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtual-machine-management.html b/tag/virtual-machine-management.html index 330d743215..835f75e3ef 100644 --- a/tag/virtual-machine-management.html +++ b/tag/virtual-machine-management.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtual-machine.html b/tag/virtual-machine.html index c497412437..49f8f6f29b 100644 --- a/tag/virtual-machine.html +++ b/tag/virtual-machine.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -392,7 +392,6 @@

    Other tags to explore

    - @@ -470,7 +469,7 @@

    Other tags to explore

    - + diff --git a/tag/virtual-machines.html b/tag/virtual-machines.html index dfd51be26b..199fa916e5 100644 --- a/tag/virtual-machines.html +++ b/tag/virtual-machines.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtualmachine.html b/tag/virtualmachine.html index b616c762d1..a37479cb3f 100644 --- a/tag/virtualmachine.html +++ b/tag/virtualmachine.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/virtualmachineinstancetype.html b/tag/virtualmachineinstancetype.html index 34e65e7732..c81f94fa7b 100644 --- a/tag/virtualmachineinstancetype.html +++ b/tag/virtualmachineinstancetype.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtualmachinepreference.html b/tag/virtualmachinepreference.html index 4aa0ad015a..b87eb6ff2b 100644 --- a/tag/virtualmachinepreference.html +++ b/tag/virtualmachinepreference.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/virtvnc.html b/tag/virtvnc.html index c76948d5ee..36ffaa7a3c 100644 --- a/tag/virtvnc.html +++ b/tag/virtvnc.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/vm-import.html b/tag/vm-import.html index 6600a2d79a..e068822d29 100644 --- a/tag/vm-import.html +++ b/tag/vm-import.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/vm.html b/tag/vm.html index 4ff95b8805..49c80d1afb 100644 --- a/tag/vm.html +++ b/tag/vm.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -378,7 +378,6 @@

    Other tags to explore

    - @@ -456,7 +455,7 @@

    Other tags to explore

    - + diff --git a/tag/volume-types.html b/tag/volume-types.html index ae05367780..080e9f710e 100644 --- a/tag/volume-types.html +++ b/tag/volume-types.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/vscode.html b/tag/vscode.html index 303790f895..b368719da2 100644 --- a/tag/vscode.html +++ b/tag/vscode.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/weavenet.html b/tag/weavenet.html index 042bc93979..003a17b235 100644 --- a/tag/weavenet.html +++ b/tag/weavenet.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/web-interface.html b/tag/web-interface.html index 86568cc1d1..d13eeeb642 100644 --- a/tag/web-interface.html +++ b/tag/web-interface.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -332,7 +332,6 @@

    Other tags to explore

    - @@ -410,7 +409,7 @@

    Other tags to explore

    - + diff --git a/tag/website.html b/tag/website.html index af84daeae3..73009a5e7b 100644 --- a/tag/website.html +++ b/tag/website.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/tag/windows.html b/tag/windows.html index 8df874290e..f97aad32c1 100644 --- a/tag/windows.html +++ b/tag/windows.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ @@ -330,7 +330,6 @@

    Other tags to explore

    - @@ -408,7 +407,7 @@

    Other tags to explore

    - + diff --git a/videos/community/meetings.html b/videos/community/meetings.html index 1e402809ac..f2467b860b 100644 --- a/videos/community/meetings.html +++ b/videos/community/meetings.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/videos/demos.html b/videos/demos.html index db633ae89a..b9b7abed22 100644 --- a/videos/demos.html +++ b/videos/demos.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/videos/index.html b/videos/index.html index 8e650208a3..33390dd246 100644 --- a/videos/index.html +++ b/videos/index.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/videos/kubevirt-summit.html b/videos/kubevirt-summit.html index 6168792ee0..e19a8306d9 100644 --- a/videos/kubevirt-summit.html +++ b/videos/kubevirt-summit.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/videos/talks.html b/videos/talks.html index 46ec70a633..99da947016 100644 --- a/videos/talks.html +++ b/videos/talks.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@ diff --git a/videos/tech-demos.html b/videos/tech-demos.html index 49b409093f..fbf97aeb42 100644 --- a/videos/tech-demos.html +++ b/videos/tech-demos.html @@ -50,7 +50,7 @@ - + @@ -225,7 +225,7 @@