diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..d4e746a --- /dev/null +++ b/404.html @@ -0,0 +1,894 @@ + + + + + + + + + + + + + + + + + + + + + + Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..67e900c --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.actuated.dev diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000..1cf13b9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 0000000..b20ec68 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Contact us

+ +

Contact us

+

Would you like to contact us about actuated for your team or oranisation?

+

Fill out this form, and we'll get in touch shortly after with next steps.

+

Actuated ™ is a trademark of OpenFaaS Ltd.

+

Keeping in touch

+ +

Anything else?

+

Looking for technical details about actuated? Try the FAQ.

+

Are you running into a problem? Try the troubleshooting guide

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/dashboard/index.html b/dashboard/index.html new file mode 100644 index 0000000..119d39a --- /dev/null +++ b/dashboard/index.html @@ -0,0 +1,1179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Dashboard - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Actuated Dashboard

+

The actuated dashboard is available to customers for their enrolled organisations.

+

For each organisation, you can see:

+
    +
  • Today's builds so far - a quick picture of today's activity across all enrolled organisations
  • +
  • Runners - Your build servers and their status
  • +
  • Build queue - All builds queued for processing and their status
  • +
  • Insights - full build history and usage by organisation, repo and user
  • +
  • Job Increases - a list of jobs that have increased in duration over 5 minutes, plus a list of the affected jobs that week
  • +
+

Plus:

+
    +
  • CLI - install the CLI for management via command line
  • +
  • SSH Sessions - connect to a runner via SSH to debug issues or to explore - works on hosted and actuated runners
  • +
+

Today's builds so far

+

On this page, you'll get today's total builds, total build minutes and a break-down on the statuses - to see if you have a majority of successful or unsuccessful builds.

+

Today at a glance

+
+

Today's activity at a glance

+
+

Underneath this section, there are a number of tips for enabling a container cache, adjusting your subscription plan and for joining the actuated Slack.

+

More detailed reports are available on the insights page.

+

Runners

+

Here you'll see if any of your servers are offline, in a draining status due a restart/update or online and ready to process builds.

+

Runner view

+

The Ping time is how long it takes for the control-plane to check the agent's capacity.

+

Build queue

+

Only builds that are queued (not yet in progress), or already in progress will be shown on this page.

+

Build queue

+

Find out how many builds are pending or running across your organisation and on which servers.

+

Insights

+

Three sets of insights are offered - all at the organisation level, so every repository is taken into account.

+

You can also switch the time window between 28 days, 14 days, 7 days or today.

+

The data is contrasted to the previous period to help you identify spikes and potential issues.

+

The data for reports always starts from the last complete day of data, so the last 7 days will start from the previous day.

+

Build history and usage by organisation.

+

Understand when your builds are running at a higher level - across all of your organisations - in one place.

+

Total organisation usage

+

You can click on Minutes to switch to total time instead of total builds, to see if the demand on your servers is increasing or decreasing over time.

+

Build history by repository

+

Repository based usage history

+

When viewing usage at a repository-level, you can easily identify anomalies and hot spots - like mounting build times, high failure rates or lots of cancelled jobs - implying a potential faulty interaction or trigger.

+

Build history per user

+

Build history per user of your GitHub organisation

+

This is where you get to learn who is trigger the most amount of builds, who may be a little less active for this period and where folks may benefit from additional training due a high failure rate of builds.

+

Job Increases

+

For up to 120 days of history, you can find jobs that have increased over 5 minutes in duration week-by-week. This feature was requested by a team whose builds were roughly 60 minutes each on GitHub's hosted runners, and 20 minutes each on actuated. They didn't want those times to creep up without it being noticed and rectified.

+

Insights on outliers

+
+

Insights on outliers showing the time that the job increased by, and a button to drill down into the affected jobs that week.

+
+

When you click "Inspect", a plot will be drawn with the maximum build time recorded on the days of the affected week. You can then click "View Job" to see what commit, Pull Request, or configuration change may have caused the increase.

+

A plot with the longest job run on each day of the affected week

+
+

A plot with the longest job run on each day of the affected week

+
+

SSH Sessions

+

Once you configure an action to pause at a set point by introducing our custom GitHub action step, you'll be able to copy and paste an SSH command and run it in your terminal.

+

Your SSH keys will be pre-installed and no password is required.

+

SSH sessions in the dashboard

+
+

Viewing an SSH session to a hosted runner

+
+

See also: Example: Debug a job with SSH

+

CLI

+

The CLI page has download instructions, you can find the downloads for Linux, macOS and Windows here:

+

self-actuated/actuated-cli

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/custom-vm-size/index.html b/examples/custom-vm-size/index.html new file mode 100644 index 0000000..951424e --- /dev/null +++ b/examples/custom-vm-size/index.html @@ -0,0 +1,1009 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Custom VM sizes - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Custom VM sizes

+

Our team will have configured your servers so that they always launch a pre-defined VM size, this keeps the user experience simple and predictable.

+

However, you can also request a specific VM size with up to 32vCPU and as much RAM as is available in the server. vCPU can be over-committed safely, however over-committing on RAM is not advised because if all of the RAM is required, one of the running VMs may exit or be terminated.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+

Request a custom VM size

+

For a custom size just append -cpu- and -gb to the above labels, for example:

+

x86_64 example:

+
    +
  • actuated-1cpu-2gb
  • +
  • actuated-4cpu-16gb
  • +
+

64-bit Arm example:

+
    +
  • actuated-arm64-4cpu-16gb
  • +
  • actuated-arm64-32cpu-64gb
  • +
+

You can change vCPU and RAM independently, there are no set combinations, so you can customise both to whatever you like.

+

The upper limit for vCPU is 32.

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+
name: specs
+
+on: push
+jobs:
+  specs:
+    runs-on: actuated-1cpu-2gb
+    steps:
+      - name: Print specs
+        run: |
+            nproc
+            free -h
+
+

This will allocate 1x vCPU and 2GB of RAM to the VM. To run this same configuration for arm64, change runs-on to actuated-arm64-1cpu-2gb.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/docker/index.html b/examples/docker/index.html new file mode 100644 index 0000000..f933e0d --- /dev/null +++ b/examples/docker/index.html @@ -0,0 +1,1020 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Docker run/build - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Kubernetes with KinD

+

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+

Try running a container to ping Google for 3 times:

+
name: build
+
+on: push
+jobs:
+  ping-google:
+    runs-on: actuated-4cpu-16gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+      - name: Run a ping to Google with Docker
+        run: |
+          docker run --rm -i alpine:latest ping -c 3 google.com
+
+

Build a container with Docker:

+
name: build
+
+on: push
+jobs:
+  build-in-docker:
+    runs-on: actuated-4cpu-16gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+      - name: Build inlets-connect using Docker
+        run: |
+          git clone --depth=1 https://github.com/alexellis/inlets-connect
+          cd inlets-connect
+          docker build -t inlets-connect .
+          docker images
+
+

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/github-actions-cache/index.html b/examples/github-actions-cache/index.html new file mode 100644 index 0000000..cb6e27a --- /dev/null +++ b/examples/github-actions-cache/index.html @@ -0,0 +1,1099 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Using caching in builds - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: GitHub Actions cache

+

Jobs on Actuated runners start in a clean VM each time. This means dependencies need to be downloaded and build artifacts or caches rebuilt each time. Caching these files in the actions cache can improve workflow execution time.

+

A lot of the setup actions for package managers have support for caching built-in. See: setup-node, setup-python, etc. They require minimal configuration and will create and restore dependency caches for you.

+

If you have custom workflows that could benefit from caching the cache can be configured manually using the actions/cache.

+

Using the actions cache is not limited to GitHub hosted runners but can be used with self-hosted runners. Workflows using the cache action can be converted to run on Actuated runners. You only need to change runs-on: ubuntu-latest to runs-on: actuated.

+

Use the GitHub Actions cache

+

In this short example we will build alexellis/registry-creds. This is a Kubernetes operator that can be used to replicate Kubernetes ImagePullSecrets to all namespaces.

+

Enable caching on a supported action

+

Create a new file at: .github/workflows/build.yaml and commit it to the repository.

+
name: build
+
+on: push
+
+jobs:
+  build:
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          repository: "alexellis/registry-creds"
+      - name: Setup Golang
+        uses: actions/setup-go@v3
+        with:
+          go-version: ~1.19
+          cache: true
+      - name: Build
+        run: |
+          CGO_ENABLED=0 GO111MODULE=on \
+          go build -ldflags "-s -w -X main.Release=dev -X main.SHA=dev" -o controller
+
+

To configure caching with the setup-go action you only need to set the cache input parameter to true.

+

The cache is populated the first time this workflow runs. Running the workflow after this should be significantly faster because dependency files and build outputs are restored from the cache.

+

Manually configure caching

+

If there is no setup action for your language that supports caching it can be configured manually.

+

Create a new file at: .github/workflows/build.yaml and commit it to the repository.

+
name: build
+
+on: push
+
+jobs:
+  build:
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          repository: "alexellis/registry-creds"
+      - name: Setup Golang
+        uses: actions/setup-go@v3
+        with:
+          go-version: ~1.19
+          cache: true
+      - name: Setup Golang caches
+        uses: actions/cache@v3
+        with:
+        path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+        key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: |
+            ${{ runner.os }}-go-
+      - name: Build
+        run: |
+          CGO_ENABLED=0 GO111MODULE=on \
+          go build -ldflags "-s -w -X main.Release=dev -X main.SHA=dev" -o controller
+
+

The setup Setup Golang caches uses the cache action to configure caching.

+

The path parameter is used to set the paths on the runner to cache or restore. The key parameter sets the key used when saving the cache. A hash of the go.sum file is used as part of the cache key.

+

Further reading

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/k3s/index.html b/examples/k3s/index.html new file mode 100644 index 0000000..8d18212 --- /dev/null +++ b/examples/k3s/index.html @@ -0,0 +1,1017 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Kubernetes with K3s - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Kubernetes with k3s

+

You may need to access Kubernetes within your build. K3s is a for-production, lightweight distribution of Kubernetes that uses fewer resources than upstream. k3sup is a popular tool for installing k3s.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+

Note that it's important to make sure Kubernetes is responsive before performing any commands like running a Pod or installing a helm chart.

+
name: k3sup-tester
+
+on: push
+jobs:
+  k3sup-tester:
+    runs-on: actuated-4cpu-16gb
+    steps:
+      - name: get arkade
+        uses: alexellis/setup-arkade@v1
+      - name: get k3sup and kubectl
+        uses: alexellis/arkade-get@master
+        with:
+          kubectl: latest
+          k3sup: latest
+      - name: Install K3s with k3sup
+        run: |
+          mkdir -p $HOME/.kube/
+          k3sup install --local --local-path $HOME/.kube/config
+      - name: Wait until nodes ready
+        run: |
+          k3sup ready --quiet --kubeconfig $HOME/.kube/config --context default
+      - name: Wait until CoreDNS is ready
+        run: |
+          kubectl rollout status deploy/coredns -n kube-system --timeout=300s
+      - name: Explore nodes
+        run: kubectl get nodes -o wide
+      - name: Explore pods
+        run: kubectl get pod -A -o wide
+
+

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/kernel/index.html b/examples/kernel/index.html new file mode 100644 index 0000000..bed06bc --- /dev/null +++ b/examples/kernel/index.html @@ -0,0 +1,1030 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Compile a Kernel - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Test that compute time by compiling a Kernel

+

Use this sample to test the raw compute speed of your hosts by building a Kernel.

+

Certified for:

+
    +
  • x86_64
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+
name: microvm-kernel
+
+on: push
+jobs:
+  microvm-kernel:
+    runs-on: actuated
+    steps:
+      - name: free RAM
+        run: free -h
+      - name: List CPUs
+        run: nproc
+      - name: get build toolchain
+        run: |
+          sudo apt update -qy
+          sudo apt-get install -qy \
+            git \
+            build-essential \
+            kernel-package \
+            fakeroot \
+            libncurses5-dev \
+            libssl-dev \
+            ccache \
+            bison \
+            flex \
+            libelf-dev \
+            dwarves
+      - name: clone linux
+        run: |
+          time git clone https://github.com/torvalds/linux.git linux.git --depth=1 --branch v5.10
+          cd linux.git
+          curl -o .config -s -f https://raw.githubusercontent.com/firecracker-microvm/firecracker/main/resources/guest_configs/microvm-kernel-x86_64-5.10.config
+          echo "# CONFIG_KASAN is not set" >> .config
+      - name: make config
+        run: |
+          cd linux.git 
+          make oldconfig
+      - name: Make vmlinux
+        run: |
+          cd linux.git
+          time make vmlinux -j$(nproc)
+          du -h ./vmlinux
+
+

When you have a build time, why not change runs-on: actuated to runs-on: ubuntu-latest to compare it to a hosted runner from GitHub?

+

Here's our test, where our own machine built the Kernel 4x faster than a hosted runner:

+

Faster Kernel builds

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/kind/index.html b/examples/kind/index.html new file mode 100644 index 0000000..bcd350f --- /dev/null +++ b/examples/kind/index.html @@ -0,0 +1,1073 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Kubernetes with KinD - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Kubernetes with KinD

+

You may need to access Kubernetes within your build. KinD is a popular option, and easy to run in an action.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+

Note that it's important to make sure Kubernetes is responsive before performing any commands like running a Pod or installing a helm chart.

+
name: build
+
+on: push
+jobs:
+  start-kind:
+    runs-on: actuated-4cpu-16gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+      - name: get arkade
+        uses: alexellis/setup-arkade@v1
+      - name: get kubectl and kubectl
+        uses: alexellis/arkade-get@master
+        with:
+          kubectl: latest
+          kind: latest
+      - name: Create a KinD cluster
+        run: |
+          mkdir -p $HOME/.kube/
+          kind create cluster --wait 300s
+      - name: Wait until CoreDNS is ready
+        run: |
+          kubectl rollout status deploy/coredns -n kube-system --timeout=300s
+      - name: Explore nodes
+        run: kubectl get nodes -o wide
+      - name: Explore pods
+        run: kubectl get pod -A -o wide
+      - name: Show kubelet logs
+        run: docker exec kind-control-plane journalctl -u kubelet
+
+

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

+

Using a registry mirror for KinD

+

Whilst the instructions for a registry mirror work for Docker, and for buildkit, KinD uses its own containerd configuration, so needs to be configured separately, as required.

+

When using KinD, if you're deploying images which are hosted on the Docker Hub, then you'll probably need to either: authenticate to the Docker Hub, or configure the registry mirror running on your server.

+

Here's an example of how to create a KinD cluster, using a registry mirror for the Docker Hub:

+
#!/bin/bash
+
+kind create cluster --wait 300s --config /dev/stdin <<EOF
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+containerdConfigPatches:
+- |-
+    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+    endpoint = ["http://192.168.128.1:5000"]
+EOF
+
+

With open source projects, you may need to run the build on GitHub's hosted runners some of the time, in which case, you can use a check whether the mirror is available:

+
curl -f --connect-timeout 0.1 -s http://192.168.128.1:5000/v2/_catalog &> /dev/null
+
+if [ "$?" == "0" ]
+then
+  echo "Mirror found, configure KinD for the mirror"
+else
+  echo "Mirror not found, use defaults"
+fi
+
+

To use authentication instead, create a Kubernetes secret of type docker-registry and then attach it to the default service account of each namespace within your cluster.

+

The OpenFaaS docs show how to do this for private registries, but the same applies for authenticating to the Docker Hub to raise rate-limits.

+

You may also like Alex's alexellis/registry-creds project which will replicate your Docker Hub credentials into each namespace within a cluster, to make sure images are pulled with the correct credentials.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/kvm-guest/index.html b/examples/kvm-guest/index.html new file mode 100644 index 0000000..a659b74 --- /dev/null +++ b/examples/kvm-guest/index.html @@ -0,0 +1,1078 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Run a KVM guest - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Run a KVM guest

+

It is possible to launch a Virtual Machine (VM) within a GitHub Action. Support for virtualization is not enabled by default for Actuated. The Agent has to be configured to use a custom kernel.

+

There are some prerequisites to enable KVM support:

+
    +
  • aarch64 runners are not supported at the moment.
  • +
  • A bare-metal host for the Agent is required.
  • +
+
+

Nested virtualisation is a premium feature

+

This feature requires a plan size of 15 concurrent builds or greater, however you can get a 14-day free trial by contacting our team directly through the actuated Slack.

+
+

Configure the Agent

+
    +
  1. +

    Make sure nested virtualization is enabled on the Agent host.

    +
  2. +
  3. +

    Edit /etc/default/actuated on the Actuated Agent and add the kvm suffix to the AGENT_KERNEL_REF variable:

    +
    - AGENT_KERNEL_REF="ghcr.io/openfaasltd/actuated-kernel:x86_64-latest"
    ++ AGENT_KERNEL_REF="ghcr.io/openfaasltd/actuated-kernel:x86_64-kvm-latest"
    +
    +
  4. +
  5. +

    Also add it to the AGENT_IMAGE_REF line:

    +
    - AGENT_IMAGE_REF="ghcr.io/openfaasltd/actuated-ubuntu22.04:x86_64-latest"
    ++ AGENT_IMAGE_REF="ghcr.io/openfaasltd/actuated-ubuntu22.04:x86_64-kvm-latest"
    +
    +
  6. +
  7. +

    Restart the Agent to use the new kernel.

    +
    sudo systemctl daemon-reload && \
    +    sudo systemctl restart actuated
    +
    +
  8. +
  9. +

    Run a test build to verify KVM support is enabled in the runner. The specs script from the test build will report whether /dev/kvm is available.

    +
  10. +
+

Run a Firecracker microVM

+

This example is an adaptation of the Firecracker quickstart guide that we run from within a GitHub Actions workflow.

+

The workflow instals Firecracker, configures and boots a guest VM and then waits 20 seconds before shutting down the VM and exiting the workflow.

+
    +
  1. +

    Create a new repository and add a workflow file.

    +

    The workflow file: ./.github/workflows/vm-run.yaml:

    +
    name: vm-run
    +
    +on: push
    +jobs:
    +vm-run:
    +    runs-on: actuated-4cpu-8gb
    +    steps:
    +    - uses: actions/checkout@master
    +        with:
    +        fetch-depth: 1
    +    - name: Install arkade
    +        uses: alexellis/setup-arkade@v2
    +    - name: Install firecracker
    +        run: sudo arkade system install firecracker
    +    - name: Run microVM
    +        run: sudo ./run-vm.sh
    +
    +
  2. +
  3. +

    Add the run-vm.sh script to the root of the repository.

    +

    Running the script will:

    +
      +
    • Get the kernel and rootfs for the microVM
    • +
    • Start fireckracker and configure the guest kernel and rootfs
    • +
    • Start the guest machine
    • +
    • Wait for 20 seconds and kill the firecracker process so workflow finishes.
    • +
    +

    The run-vm.sh script:

    +
    #!/bin/bash
    +
    +# Clone the example repo
    +git clone https://github.com/skatolo/nested-firecracker.git
    +
    +# Run the VM script
    +./nested-firecracker/run-vm.sh 
    +
    +
  4. +
  5. +

    Hit commit and check the run logs of the workflow. You should find the login prompt of the running microVM in the logs.

    +
  6. +
+

The full example is available on GitHub

+

For more examples and use-cases see:

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/matrix-k8s/index.html b/examples/matrix-k8s/index.html new file mode 100644 index 0000000..03dcce3 --- /dev/null +++ b/examples/matrix-k8s/index.html @@ -0,0 +1,1045 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Kubernetes regression matrix - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Regression test against various Kubernetes versions

+

This example launches multiple Kubernetes clusters in parallel for regression and end to end testing.

+

In the example, We're testing the CRD for the inlets-operator on versions v1.16 through to v1.25. You could also switch out k3s for KinD, if you prefer.

+

See also: Actuated with KinD

+

Launching 10 Kubernetes clusters in parallel

+
+

Launching 10 Kubernetes clusters in parallel across your fleet of Actuated Servers.

+
+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+

Customise both the array "k3s" with the versions you need to test and replace the step "Test crds" with whatever you need to install such as helm charts.

+
name: k3s-test-matrix
+
+on:
+  pull_request:
+    branches:
+      - '*'
+  push:
+    branches:
+      - master
+      - main
+
+jobs:
+  kubernetes:
+    name: k3s-test-${{ matrix.k3s }}
+    runs-on: actuated-4cpu-12gb
+    strategy:
+      matrix:
+        k3s: [v1.16, v1.17, v1.18, v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25]
+
+    steps:
+      - uses: actions/checkout@v1
+      - uses: alexellis/setup-arkade@v2
+      - uses: alexellis/arkade-get@master
+        with:
+          kubectl: latest
+          k3sup: latest
+
+      - name: Create Kubernetes ${{ matrix.k3s }} cluster
+        run: |
+          mkdir -p $HOME/.kube/
+          k3sup install \
+            --local \
+            --k3s-channel ${{ matrix.k3s }} \
+            --local-path $HOME/.kube/config \
+            --merge \
+            --context default
+          cat $HOME/.kube/config
+
+          k3sup ready --context default
+          kubectl config use-context default
+
+          # Just an extra test on top.
+          echo "Waiting for nodes to be ready ..."
+          kubectl wait --for=condition=Ready nodes --all --timeout=5m
+          kubectl get nodes -o wide
+
+      - name: Test crds
+        run: |
+          echo "Applying CRD"
+          kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/artifacts/crds/inlets.inlets.dev_tunnels.yaml
+
+

The matrix will cause a new VM to be launched for each item in the "k3s" array.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/matrix/index.html b/examples/matrix/index.html new file mode 100644 index 0000000..ad1db8c --- /dev/null +++ b/examples/matrix/index.html @@ -0,0 +1,1015 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Job Matrix - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: matrix-build - run a VM per each job in a matrix

+

Use this sample to test launching multiple VMs in parallel.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64 including Raspberry Pi 4
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+
name: CI
+
+on:
+  pull_request:
+    branches:
+      - '*'
+  push:
+    branches:
+      - master
+      - main
+
+jobs:
+  arkade-e2e:
+    name: arkade-e2e
+    runs-on: actuated-4cpu-12gb
+    strategy:
+      matrix:
+        apps: [run-job,k3sup,arkade,kubectl,faas-cli]
+    steps:
+      - name: Get arkade
+        run: |
+          curl -sLS https://get.arkade.dev | sudo sh
+      - name: Download app
+        run: |
+          echo ${{ matrix.apps }}
+          arkade get ${{ matrix.apps }}
+          file /home/runner/.arkade/bin/${{ matrix.apps }}
+
+

The matrix will cause a new VM to be launched for each item in the "apps" array.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/multiarch-buildx/index.html b/examples/multiarch-buildx/index.html new file mode 100644 index 0000000..35894a2 --- /dev/null +++ b/examples/multiarch-buildx/index.html @@ -0,0 +1,1106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Multi-arch images with buildx - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Multi-arch with buildx

+

A multi-arch or multi-platform container is effectively where you build the same container image for multiple different Operating Systems or CPU architectures, and link them together under a single name.

+

So you may publish an image named: ghcr.io/inlets-operator/latest, but when this image is fetched by a user, a manifest file is downloaded, which directs the user to the appropriate image for their architecture.

+

If you'd like to see what these look like, run the following with arkade:

+
arkade get crane
+
+crane manifest ghcr.io/inlets/inlets-operator:latest
+
+

You'll see a manifests array, with a platform section for each image:

+
{
+  "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
+  "manifests": [
+    {
+      "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+      "digest": "sha256:bae8025e080d05f1db0e337daae54016ada179152e44613bf3f8c4243ad939df",
+      "platform": {
+        "architecture": "amd64",
+        "os": "linux"
+      }
+    },
+    {
+      "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+      "digest": "sha256:3ddc045e2655f06653fc36ac88d1d85e0f077c111a3d1abf01d05e6bbc79c89f",
+      "platform": {
+        "architecture": "arm64",
+        "os": "linux"
+      }
+    }
+  ]
+}
+
+

Try an example

+

This example is taken from the Open Source inlets-operator.

+

It builds a container image containing a Go binary and uses a Dockerfile in the root of the repository. All of the images and corresponding manifest are published to GitHub's Container Registry (GHCR). The action itself is able to authenticate to GHCR using a built-in, short-lived token. This is dependent on the "permissions" section and "packages: write" being set.

+

View publish.yaml, adapted for actuated:

+
name: publish
+
+on:
+  push:
+    tags:
+      - '*'
+
+jobs:
+  publish:
++    permissions:
++      packages: write
+
+-   runs-on: ubuntu-latest
++   runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+
++     - name: Setup mirror
++       uses: self-actuated/hub-mirror@master
+      - name: Get TAG
+        id: get_tag
+        run: echo TAG=${GITHUB_REF#refs/tags/} >> $GITHUB_ENV
+      - name: Get Repo Owner
+        id: get_repo_owner
+        run: echo "REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" > $GITHUB_ENV
+
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+      - name: Login to container Registry
+        uses: docker/login-action@v3
+        with:
+          username: ${{ github.repository_owner }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+          registry: ghcr.io
+
+      - name: Release build
+        id: release_build
+        uses: docker/build-push-action@v5
+        with:
+          outputs: "type=registry,push=true"
+          platforms: linux/amd64,linux/arm/v6,linux/arm64
+          build-args: |
+            Version=${{  env.TAG }}
+            GitCommit=${{ github.sha }}
+          tags: |
+            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:${{ github.sha }}
+            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:${{ env.TAG }}
+            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:latest
+
+

You'll see that we added a Setup mirror step, this explained in the Registry Mirror example

+

The docker/setup-qemu-action@v3 step is responsible for setting up QEMU, which is used to emulate the different CPU architectures.

+

The docker/build-push-action@v5 step is responsible for passing in a number of platform combinations such as: linux/amd64 for cloud, linux/arm64 for Arm servers and linux/arm/v6 for Raspberry Pi.

+

Within the Dockerfile, we needed to make a couple of changes.

+

You can pick to run the step in either the BUILDPLATFORM or TARGETPLATFORM. The BUILDPLATFORM is the native architecture and platform of the machine performing the build, this is usually amd64. The TARGETPLATFORM is important for the final step of the build, and will be injected based upon one each of the platforms you have specified in the step.

+
- FROM golang:1.22 as builder
++ FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22 as builder
+
+

For Go specifically, we also updated the go build command to tell Go to use cross-compilation based upon the TARGETOS and TARGETARCH environment variables, which are populated by Docker.

+
GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o inlets-operator
+
+

Learn more in the Docker Documentation: Multi-platform images

+

Is it slow to build for Arm?

+

Using QEMU can be slow at times, especially when building an image for Arm using a hosted GitHub Runner.

+

We found that we could increase an Open Source project's build time by 22x - from ~ 36 minutes to 1 minute 26 seconds.

+

See also How to make GitHub Actions 22x faster with bare-metal Arm

+

To build a separate image for Arm on an Arm runner, and one for amd64, you could use a matrix build.

+

Need a hand with GitHub Actions?

+

Check your plan to see if access to Slack is included, if so, you can contact us on Slack for help and guidance.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/openfaas-helm/index.html b/examples/openfaas-helm/index.html new file mode 100644 index 0000000..d3d91d1 --- /dev/null +++ b/examples/openfaas-helm/index.html @@ -0,0 +1,1074 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Deploy a Helm chart - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Publish an OpenFaaS function

+

This example will create a Kubernetes cluster using KinD, deploy OpenFaaS using Helm, deploy a function, then invoke the function. There are some additional checks for readiness for Kubernetes and the OpenFaaS gateway.

+

You can adapt this example for any other Helm charts you may have for E2E testing.

+

We also recommend considering arkade for installing CLIs and common Helm charts for testing.

+

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a new GitHub repository in your organisation.

+

Add: .github/workflows/e2e.yaml

+
name: e2e
+
+on:
+  push:
+    branches:
+      - '*'
+  pull_request:
+    branches:
+      - '*'
+
+permissions:
+  actions: read
+  contents: read
+
+jobs:
+  e2e:
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+      - name: get arkade
+        uses: alexellis/setup-arkade@v1
+      - name: get kubectl and kubectl
+        uses: alexellis/arkade-get@master
+        with:
+          kubectl: latest
+          kind: latest
+          faas-cli: latest
+      - name: Install Kubernetes KinD
+        run: |
+          mkdir -p $HOME/.kube/
+          kind create cluster --wait 300s
+      - name: Add Helm chart, update repos and apply namespaces
+        run: |
+            kubectl apply -f https://raw.githubusercontent.com/openfaas/faas-netes/master/namespaces.yml
+            helm repo add openfaas https://openfaas.github.io/faas-netes/
+            helm repo update
+      - name: Install the Community Edition (CE)
+        run: |
+            helm repo update \
+            && helm upgrade openfaas --install openfaas/openfaas \
+                --namespace openfaas  \
+                --set functionNamespace=openfaas-fn \
+                --set generateBasicAuth=true
+      - name: Wait until OpenFaaS is ready
+        run: |
+            kubectl rollout status -n openfaas deploy/prometheus --timeout 5m
+            kubectl rollout status -n openfaas deploy/gateway --timeout 5m
+      - name: Port forward the gateway
+        run: |
+            kubectl port-forward -n openfaas svc/gateway 8080:8080 &
+
+            attempts=0
+            max=10
+
+            until $(curl --output /dev/null --silent --fail http://127.0.0.1:8080/healthz ); do
+                if [ ${attempts} -eq ${max} ]; then
+                echo "Max attempts reached $max waiting for gateway's health endpoint"
+                exit 1
+                fi
+
+                printf '.'
+                attempts=$(($attempts+1))
+                sleep 1
+            done
+      - name: Login to OpenFaaS gateway and deploy a function
+        run: |
+           PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo)
+           echo -n $PASSWORD | faas-cli login --username admin --password-stdin 
+
+           faas-cli store deploy env
+
+           faas-cli invoke env <<< ""
+
+           curl -s -f -i http://127.0.0.1:8080/function/env
+
+           faas-cli invoke --async env <<< ""
+
+           kubectl logs -n openfaas deploy/queue-worker
+
+           faas-cli describe env
+
+

If you'd like to deploy the function, check out a more comprehensive example of how to log in and deploy in Serverless For Everyone Else

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/openfaas-publish/index.html b/examples/openfaas-publish/index.html new file mode 100644 index 0000000..72176c7 --- /dev/null +++ b/examples/openfaas-publish/index.html @@ -0,0 +1,1055 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Publish an OpenFaaS function - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Publish an OpenFaaS function

+

This example will publish an OpenFaaS function to GitHub's Container Registry (GHCR).

+
    +
  • The example uses Docker's buildx and QEMU for a multi-arch build
  • +
  • Dynamic variables to inject the SHA and OWNER name from the repo
  • +
  • Uses the token that GitHub assigns to the action to publish the containers.
  • +
+

You can also run this example on GitHub's own hosted runners.

+

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

+

Certified for:

+
    +
  • x86_64
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

For alexellis' repository called alexellis/autoscaling-functions, then check out the .github/workflows/publish.yml file:

+
    +
  • The "Setup QEMU" and "Set up Docker Buildx" steps configure the builder to produce a multi-arch image.
  • +
  • The "OWNER" variable means this action can be run on any organisation without having to hard-code a username for GHCR.
  • +
  • Only the bcrypt function is being built with the --filter command added, remove it to build all functions in the stack.yml.
  • +
  • --platforms linux/amd64,linux/arm64,linux/arm/v7 will build for regular Intel/AMD machines, 64-bit Arm and 32-bit Arm i.e. Raspberry Pi, most users can reduce this list to just "linux/amd64" for a speed improvement
  • +
+

Make sure you edit runs-on: and set it to runs-on: actuated-4cpu-12gb

+
name: publish
+
+on:
+  push:
+    branches:
+      - '*'
+  pull_request:
+    branches:
+      - '*'
+
+permissions:
+  actions: read
+  checks: write
+  contents: read
+  packages: write
+
+jobs:
+  publish:
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@master
+        with:
+          fetch-depth: 1
+      - name: Get faas-cli
+        run: curl -sLSf https://cli.openfaas.com | sudo sh
+      - name: Pull custom templates from stack.yml
+        run: faas-cli template pull stack
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+      - name: Get TAG
+        id: get_tag
+        run: echo ::set-output name=TAG::latest-dev
+      - name: Get Repo Owner
+        id: get_repo_owner
+        run: >
+          echo ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} |
+          tr '[:upper:]' '[:lower:]')
+      - name: Docker Login
+        run: > 
+          echo ${{secrets.GITHUB_TOKEN}} | 
+          docker login ghcr.io --username 
+          ${{ steps.get_repo_owner.outputs.repo_owner }} 
+          --password-stdin
+      - name: Publish functions
+        run: >
+          OWNER="${{ steps.get_repo_owner.outputs.repo_owner }}" 
+          TAG="latest"
+          faas-cli publish
+          --extra-tag ${{ github.sha }}
+          --build-arg GO111MODULE=on
+          --platforms linux/amd64,linux/arm64,linux/arm/v7
+          --filter bcrypt
+
+

If you'd like to deploy the function, check out a more comprehensive example of how to log in and deploy in Serverless For Everyone Else

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/system-info/index.html b/examples/system-info/index.html new file mode 100644 index 0000000..57dbce6 --- /dev/null +++ b/examples/system-info/index.html @@ -0,0 +1,1093 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + System Info - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Get system information about your microVM

+

This sample reveals system information about your runner.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

Create a specs.sh file:

+
#!/bin/bash
+
+echo Hostname: $(hostname)
+
+echo whoami: $(whoami)
+
+echo Information on main disk
+df -h /
+
+echo Memory info
+free -h
+
+echo Total CPUs:
+echo CPUs: $(nproc)
+
+echo CPU Model
+cat /proc/cpuinfo |grep "model name"
+
+echo Kernel and OS info
+uname -a
+
+if ! [ -e /dev/kvm ]; then
+  echo "/dev/kvm does not exist"
+else
+  echo "/dev/kvm exists"
+fi
+
+echo OS info: $(cat /etc/os-release)
+
+echo PATH: ${PATH}
+
+echo Egress IP:
+curl -s -L -S https://checkip.amazonaws.com
+
+

Create a new file at: .github/workflows/build.yml and commit it to the repository.

+
name: CI
+
+on:
+  pull_request:
+    branches:
+      - '*'
+  push:
+    branches:
+      - master
+
+jobs:
+  specs:
+    name: specs
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@v1
+      - name: Check specs
+        run: |
+          ./specs.sh
+
+

Note how the hostname changes every time the job is run.

+

Perform a basic benchmark

+

Update the specs.sh file to include benchmarks for disk and network connection:

+
echo Installing hdparm
+
+sudo apt update -qqqqy && sudo apt install -qqqqy hdparm
+
+echo Read speed
+
+sudo hdparm -t $(mount |grep "/ "|cut -d " " -f1)
+
+echo Write speed
+
+sync; dd if=/dev/zero of=./tempfile bs=1M count=1024; sync
+
+echo Where is this runner?
+
+curl -s http://ip-api.com/json|jq
+
+echo Information on main disk
+
+df -h /
+
+echo Public IP:
+
+curl -s -L -S https://checkip.amazonaws.com
+
+echo Checking speed
+sudo pip install speedtest-cli
+speedtest-cli
+
+

For the fastest servers backed by NVMes, with VMs running on a dedicated drive, we tend to see:

+
    +
  • Read speeds of 1000+ MB/s.
  • +
  • Write speeds of 1000+ MB/s.
  • +
+

The Internet speed test will give you a good idea of how quickly large artifacts can be uploaded or downloading during jobs.

+

The instructions for a Docker registry cache on the server can make using container images from public registries much quicker.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/expose-agent/index.html b/expose-agent/index.html new file mode 100644 index 0000000..18a00fa --- /dev/null +++ b/expose-agent/index.html @@ -0,0 +1,1128 @@ + + + + + + + + + + + + + + + + + + + + + + + + Expose agent - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Expose agent

+ +

Expose the agent's API over HTTPS

+

The actuated agent serves HTTP, and is accessed by the Actuated control plane.

+

We expect most of our customers to be using hosts with public IP addresses, and the combination of an API token plus TLS is a battle tested combination.

+

For anyone running with private hosts within a firewall, a private peering option is available for enterprise companies, or our inlets network tunnel can be used with an IP allow list.

+

For a host on a public cloud

+

If you're running the agent on a host with a public IP, you can use the built-in TLS mechanism in the actuated agent to receive a certificate from Let's Encrypt, valid for 90 days. The certificate will be renewed by the actuated agent, so there are no additional administration tasks required.

+

The installation will automatically configure the below settings. They are included just for reference, so you can understand what's involved or tweak the settings if necessary.

+

Accessing the agent's endpoint built-in TLS and Let's Encrypt

+
+

Pictured: Accessing the agent's endpoint built-in TLS and Let's Encrypt

+
+

Determine the public IP of your instance:

+
# curl -s http://checkip.amazonaws.com
+
+141.73.80.100
+
+

Now imagine that your sub-domain is agent.example.com, you need to create a DNS A or DNS CNAME record of agent.example.com=141.73.80.100, changing both the sub-domain and IP to your own.

+

Once the agent is installed, edit /etc/default/actuated on the agent and set the following two variables:

+
AGENT_LETSENCRYPT_DOMAIN="agent.example.com"
+AGENT_LETSENCRYPT_EMAIL="webmaster@agent.example.com"
+
+

Restart the agent:

+
sudo systemctl daemon-reload
+sudo systemctl restart actuated
+
+

Your agent's endpoint URL is going to be: https://agent.example.com on port 443

+

Private hosts - private peering for enterprises

+

For enterprise customers, we can offer private peering of the actuated agent for when your servers are behind a corporate firewall, or have no inbound Internet access.

+

Peering example

+
+

Peering example for an enterprise with two agents within their own private firewalls.

+
+

This option is built-into the actuated agent, and requires no additional setup, firewall or routing rules. It's similar to how the GitHub Actions agent works by creating an outbound connection, without relying on any inbound data path.

+
    +
  • The client makes an outbound connect to the Actuated control-plane.
  • +
  • If for any reason, the connection gets closed or severed, it will reconnect automatically.
  • +
  • All traffic is encrypted with HTTPS.
  • +
  • Only the Actuated control-plane will be able to communicate with the agent, privately.
  • +
+

Private hosts - behind NAT or at the office

+

The default way to configure a server for actuated, is to have its HTTPS endpoint available on the public Internet. A quick and easy way to do that is with our inlets network tunnel tool. This works by creating a VM with a public IP address, then connecting a client from your private network to the VM. Then the port on the private machine becomes available on the public VM for the Actuated control-plane to access as required.

+

An IP allow-list can also be configured with the egress IP address of the Actuated control-plane. We will provide the egress IP address upon request to customers.

+

Accessing the agent's private endpoint using an inlets-pro tunnel

+
+

Pictured: Accessing the agent's private endpoint using an inlets-pro tunnel

+
+

Reach out to us if you'd like us to host a tunnel server for you, alternatively, you can follow the instructions below to set up your own.

+

The inletsctl tool will create a HTTPS tunnel server with you on your favourite cloud with a HTTPS certificate obtained from Let's Encrypt.

+

If you have just the one Actuated Agent:

+
export AGENT_DOMAIN=agent1.example.com
+export LE_EMAIL=webmaster@agent1.example.com
+
+arkade get inletsctl
+sudo mv $HOME/.arkade/bin/inletsctl /usr/local/bin/
+
+inletsctl create \
+    --provider digitalocean \
+    --region lon1 \
+    --token-file $HOME/do-token \
+    --letsencrypt-email $LE_EMAIL \
+    --letsencrypt-domain $AGENT_DOMAIN
+
+

Then note down the tunnel's wss:// URL and token.

+

If you wish to configure an IP allow list, log into the VM with SSH and then edit the systemd unit file for inlets-pro. Add the actuated controller egress IP as per these instructions.

+

Then run a HTTPS client to expose your agent:

+
inlets-pro http client \
+    --url $WSS_URL \
+    --token $TOKEN \
+    --upstream http://127.0.0.1:8081
+
+

For two or more Actuated Servers:

+
export AGENT_DOMAIN1=agent1.example.com
+export AGENT_DOMAIN2=agent2.example.com
+export LE_EMAIL=webmaster@agent1.example.com
+
+arkade get inletsctl
+sudo mv $HOME/.arkade/bin/inletsctl /usr/local/bin/
+
+inletsctl create \
+    --provider digitalocean \
+    --region lon1 \
+    --token-file $HOME/do-token \
+    --letsencrypt-email $LE_EMAIL \
+    --letsencrypt-domain $AGENT_DOMAIN1 \
+    --letsencrypt-domain $AGENT_DOMAIN2
+
+

Then note down the tunnel's wss:// URL and token.

+

Then run a HTTPS client to expose your agent, using the unique agent domain, run the inlets-pro client on the Actuated Servers:

+
export AGENT_DOMAIN1=agent1.example.com
+inlets-pro http client \
+    --url $WSS_URL \
+    --token $TOKEN \
+    --upstream $AGENT1_DOMAIN=http://127.0.0.1:8081
+
+
export AGENT_DOMAIN2=agent2.example.com
+inlets-pro http client \
+    --url $WSS_URL \
+    --token $TOKEN \
+    --upstream $AGENT1_DOMAIN=http://127.0.0.1:8081
+
+

You can generate a systemd service (so that inlets restarts upon disconnection, and reboot) by adding --generate=systemd > inlets.service and running:

+
sudo cp inlets.service /etc/systemd/system/
+sudo systemctl daemon-reload
+sudo systemctl enable inlets.service
+sudo systemctl start inlets
+
+# Check status with:
+sudo systemctl status inlets
+
+

Your agent's endpoint URL is going to be: https://$AGENT_DOMAIN.

+

Preventing the runner from accessing your local network

+
+

Network segmentation

+

Proper network segmentation of hosts running the actuated agent is required. This is to prevent runners from making outbound connections to other hosts on your local network. We will not accept any responsibility for your configuration.

+
+

If hardware isolation is not available, iptables rules may provide an alternative for isolating the runners from your network.

+

Imagine you were using a LAN range of 192.168.0.0/24, with a router of 192.168.0.1, then the following probes and tests show that the runner cannot access the host 192.168.0.101, and that nmap's scan will come up dry.

+

We add a rule to allow access to the router, but reject packets going via TCP or UDP to any other hosts on the network.

+
sudo iptables --insert CNI-ADMIN \
+    --destination  192.168.0.1 --jump ACCEPT
+sudo iptables --insert CNI-ADMIN \
+    --destination  192.168.0.0/24 --jump REJECT -p tcp  --reject-with tcp-reset
+sudo iptables --insert CNI-ADMIN \
+    --destination  192.168.0.0/24 --jump REJECT -p udp --reject-with icmp-port-unreachable
+
+

You can test the efficacy of these rules by running nmap, mtr, ping and any other probing utilities within a GitHub workflow.

+
name: CI
+
+on:
+  pull_request:
+    branches:
+      - '*'
+  push:
+    branches:
+      - master
+      - main
+
+jobs:
+  specs:
+    name: specs
+    runs-on: actuated-4cpu-12gb
+    steps:
+      - uses: actions/checkout@v1
+      - name: addr
+        run: ip addr
+      - name: route
+        run: ip route
+      - name: pkgs
+        run: |
+             sudo apt-get update && \
+              sudo apt-get install traceroute mtr nmap netcat -qy
+      - name: traceroute
+        run: traceroute  192.168.0.101
+      - name: Connect to ssh
+        run: echo | nc  192.168.0.101 22
+      - name: mtr
+        run: mtr -rw  -c 1  192.168.0.101
+      - name: nmap for SSH
+        run: nmap -p 22  192.168.0.0/24
+      - name: Ping router
+        run: |
+          ping -c 1  192.168.0.1
+      - name: Ping 101
+        run: |
+          ping -c 1  192.168.0.101
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/faq/index.html b/faq/index.html new file mode 100644 index 0000000..59b9ab0 --- /dev/null +++ b/faq/index.html @@ -0,0 +1,1885 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + FAQ - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Frequently Asked Questions (FAQ)

+

How does it work?

+

Actuated has three main parts:

+
    +
  1. an agent which knows how to run VMs, you install this on your hosts
  2. +
  3. a VM image and Kernel that we build which has everything required for Docker, KinD and K3s
  4. +
  5. a multi-tenant control plane that we host, which tells your agents to start VMs and register a runner on your GitHub organisation
  6. +
+

The multi-tenant control plane is run and operated by OpenFaaS Ltd as a SaaS.

+

Conceptual flow of starting up a new ephemeral runner

+
+

The conceptual overview showing how a MicroVM is requested by the control plane.

+
+

MicroVMs are only started when needed, and are registered with GitHub by the official GitHub Actions runner, using a short-lived registration token. The token is been encrypted with the public key of the agent. This ensures no other agent could use the token to bootstrap a token to the wrong organisation.

+

Learn more: Self-hosted GitHub Actions API

+

Glossary

+
    +
  • MicroVM - a lightweight, single-use VM that is created by the Actuated Agent, and is destroyed after the build is complete. Common examples include firecracker by AWS and Cloud Hypervisor
  • +
  • Guest Kernel - a Linux kernel that is used together with a Root filesystem to boot a MicroVM and run your CI workloads
  • +
  • Root filesystem - an immutable image maintained by the actuated team containing all necessary software to perform a build
  • +
  • Actuated ('Control Plane') - a multi-tenant SaaS run by the actuated team responsible for scheduling MicroVMs to the Actuated Agent
  • +
  • Actuated Agent - the software component installed on your Server which runs a MicroVM when instructed by Actuated
  • +
  • Actuated Server ('Server') - a server on which the Actuated Agent has been installed, where your builds will execute.
  • +
+

How does actuated compare to a self-hosted runner?

+

A self-hosted runner is a machine on which you've installed and registered the a GitHub runner.

+

Quite often these machines suffer from some, if not all of the following issues:

+
    +
  • They require several hours to get all the required packages correctly installed to mirror a hosted runner
  • +
  • You never update them out of fear of wasting time or breaking something which is working, meaning your supply chain is at risk
  • +
  • Builds clash, if you're building a container image, or running a KinD cluster, names will clash, dirty state will be left over
  • +
+

We've heard in user interviews that the final point of dirty state can cause engineers to waste several days of effort chasing down problems.

+

Actuated uses a one-shot VM that is destroyed immediately after a build is completed.

+

Who is actuated for?

+

actuated is primarily for software engineering teams who are currently using GitHub Actions or GitLab CI.

+
    +
  • You can outsource your CI infrastructure to the actuated team
  • +
  • You'll get VM-level isolation, with no risks of side-effects between builds
  • +
  • You can run on much faster hardware
  • +
  • You'll get insights on how to fine-tune the performance of your builds
  • +
  • And save a significant amount of money vs. larger hosted runners if you use 10s or 100s of thousands of minutes per month
  • +
+

For GitHub users, a GitHub organisation is required for installation, and runners are attached to individual repositories as required to execute builds.

+

Is there a sponsored subscription for Open Source projects?

+

We have a sponsored program with the CNCF and Ampere for various Open Source projects, you can find out more here: Announcing managed Arm CI for CNCF projects.

+

Sponsored projects are required to add our GitHub badge to the top of their README file for each repository where the actuated is being used, along with any other GitHub badges such as build status, code coverage, etc.

+
[![Arm CI sponsored by Actuated](https://img.shields.io/badge/SA_actuated.dev-004BDD)](https://actuated.dev/)
+
+

or

+
<a href="https://actuated.dev/"><img alt="Arm CI sponsored by Actuated" src="https://docs.actuated.dev/images/actuated-badge.png" width="120px"></img></a>
+
+

For an example of what this would look like, see the inletsctl project README.

+

What kind of machines do I need for the agent?

+

You'll need either: a bare-metal host (your own machine, Hetzner Dedicated, Equinix Metal, etc), or a VM that supports nested virtualisation such as those provided by OpenStack, GCP, DigitalOcean, Azure, or VMware.

+

See also: Provision a Server section

+

When will Jenkins, GitLab CI, BitBucket Pipeline Runners, Drone or Azure DevOps be supported?

+

Support for GitHub Actions and GitLab CI is available.

+

Unfortunately, other CI systems tend to expect runners to be available indefinitely, which is an anti-pattern. Why? They gather side-effects and often rely on the insecure use of Docker in Docker, privileged containers, or mounting the Docker socket.

+

If you'd like to migrate to GitHub Actions, or GitLab CI, feel free to reach out to us for help.

+

Is GitHub Enterprise supported?

+

GitHub.com's Pro, Team and Enterprise Cloud plans are supported.

+

GitHub Enterprise Server (GHES) is a self-hosted version of GitHub and requires additional onboarding steps. Please reach out to us if you're interested in using actuated with your installation of GHES.

+

What kind of access is required to my GitHub Organisation?

+

GitHub Apps provide fine-grained privileges, access control, and event data.

+

Actuated integrates with GitHub using a GitHub App.

+

The actuated GitHub App will request:

+
    +
  • Administrative access to add/remove GitHub Actions Runners to individual repositories
  • +
  • Events via webhook for Workflow Runs and Workflow Jobs
  • +
+

Did you know? The actuated service does not have any access to your code or private or public repositories.

+

Can GitHub's self-hosted runner be used on public repos?

+

Actuated VMs can be used with public repositories, however the standard self-hosted runner when used stand-alone, with Docker, or with Kubernetes cannot.

+

The GitHub team recommends only running their self-hosted runners on private repositories.

+

Why?

+

I took some time to ask one of the engineers on the GitHub Actions team.

+
+

With the standard self-hosted runner, a bad actor could compromise the system or install malware leaving side-effects for future builds.

+
+

He replied that it's difficult for maintainers to secure their repos and workflows, and that bad actors could compromise a runner host due to the way they run multiple jobs, and are not a fresh environment for each build. It may even be because a bad actor could scan the local network of the runner and attempt to gain access to other systems.

+

If you're wondering whether containers and Pods are a suitable isolation level, we would recommend against this since it usually involves one of either: mounting a docker socket (which can lead to escalation to root on the host) or running Docker In Docker (DIND) which requires a privileged container (which can lead to escalation to root on the host).

+

So, can you use actuated on a public repo?

+

Our contact at GitHub stated that through VM-level isolation and an immutable VM image, the primary concerns is resolved, because there is no way to have state left over or side effects from previous builds.

+

Actuated fixes the isolation problem, and prevents side-effects between builds. We also have specific iptables rules in the troubleshooting guide which will isolate your runners from the rest of the network.

+

Can I use the containers feature of GitHub Actions?

+

Yes, it is supported, however it is not required, and may make it harder to debug your builds. We prefer and recommend running on the host directly, which gives better performance and a simpler experience. Common software and packages are already within the root filesystem, and can be added with setup-X actions, or arkade get or arkade system install.

+

GitHub Action's Running jobs in a container feature is supported, as is Docker, Buildx, Kubernetes, KinD, K3s, eBPF, etc.

+

Example of running commands with the docker.io/node:latest image.

+
jobs:
+  specs:
+    name: test
+    runs-on: actuated-4cpu-12gb
+    container:
+      image: docker.io/node:latest
+      env:
+        NODE_ENV: development
+      ports:
+        - 3000
+      options: --cpus 1
+    steps:
+      - name: Check for dockerenv file
+        run: node --version
+
+

How many builds does a single actuated VM run?

+

When a VM starts up, it runs the GitHub Actions Runner ephemeral (aka one-shot) mode, so in can run at most one build. After that, the VM will be destroyed.

+

See also: GitHub: ephemeral runners

+

How are VMs scheduled?

+

VMs are placed efficiently across your Actuated Servers using a scheduling algorithm based upon the amount of RAM reserved for the VM.

+

Autoscaling of VMs is automatic. Let's say that you had 10 jobs pending, but given the RAM configuration, only enough capacity to run 8 of them? The second two would be queued until capacity one or more of those 8 jobs completed.

+

If you find yourself regularly getting into a queued state, there are three potential changes to consider:

+
    +
  1. Using Actuated Servers with more RAM
  2. +
  3. Allocated less RAM to each job
  4. +
  5. Adding more Actuated Servers
  6. +
+

The plan you select will determine how many Actuated Servers you can run, so consider 1. and 2. before 3.

+

Do I need to auto-scale the Actuated Servers?

+

Please read the section "How are VMs scheduled".

+

Auto-scaling Pods or VMs is a quick, painless operation that makes sense for customer traffic, which is generally unpredictable and can be very bursty.

+

GitHub Actions tends to be driven by your internal development team, with a predictable pattern of activity. It's unlikely to vary massively day by day, which means autoscaling is less important than with a user-facing website.

+

In addition to that, bare-metal servers can take 5-10 minutes to provision and may even include a setup fee or monthly commitment, meaning that what you're used to seeing with Kubernetes or AWS Autoscaling Groups may not translate well, or even be required for CI.

+

If you are cost sensitive, you should review the options under Provision a Server section.

+

Depending on your provider, you may also be able to hibernate or suspend servers on a cron schedule to save a few dollars. Actuated will hold jobs in a queue until a server is ready to take them again.

+

What do I need to change in my workflows to use actuated?

+

The changes to your workflow YAML file are minimal.

+

Just set runs-on to the actuated label plus the amount of CPUs and RAM you'd like. The order is fixed, but the values for vCPU/RAM are flexible and can be set as required.

+

You can set something like: runs-on: actuated-4cpu-16gb or runs-on: actuated-arm64-8cpu-32gb.

+

Is 64-bit Arm supported?

+

Yes, actuated is built to run on both Intel/AMD and 64-bit Arm hosts, check your subscription plan to see if 64-bit Arm is included. This includes a Raspberry Pi 4B, AWS Graviton, Oracle Cloud Arm instances and potentially any other 64-bit Arm instances which support virtualisation.

+

What's in the VM image and how is it built?

+

The VM image contains similar software to the hosted runner image: ubuntu-latest offered by GitHub. Unfortunately, GitHub does not publish this image, so we've done our best through user-testing to reconstruct it, including all the Kernel modules required to run Kubernetes and Docker.

+

The image is built automatically using GitHub Actions and is available on a container registry.

+

The primary guest OS version is Ubuntu 22.04. Ubuntu 20.04 is available on request.

+

What Kernel version is being used?

+

The Firecracker team provides guest configurations. These may not LTS, or the latest version available, however they are fully functional for CI/CD use-cases and are known to work with Firecracker.

+

Stable Kernel version:

+
    +
  • x86_64 - Linux Kernel 5.10.201
  • +
  • aarch64 - Linux Kernel 5.10.201
  • +
+

Experimental Kernel version:

+
    +
  • aarch64 - Linux Kernel 6.1.90
  • +
+

Where are the Kernel headers / includes?

+
+

Warning

+

The following command is only designed for off the shelf cloud image builds of Ubuntu server, and will not work on actuated.

+
apt-get install linux-headers-$(uname -r) 
+
+
+

For actuated, you'll need to take a different approach to build a DKMS or kmod module for your Kernel.

+

Add self-actuated/get-kernel-sources to your workflow and run it before your build step.

+
      - name: Install Kernel headers
+        uses: self-actuated/get-kernel-sources@v1
+
+

An if statement can be added to the block, if you also run the same job on various other types of runners outside of actuated.

+

Where is the Kernel configuration?

+

You can run a job to print out or dump the configuration from proc, or from /boot/.

+

Just create a new job, or an SSH debug session and run:

+
sudo modprobe configs
+cat /proc/config.gz | gunzip > /tmp/config
+
+# Look for a specific config option
+cat /tmp/config | grep "CONFIG_DEBUG_INFO_BTF"
+
+

How easy is it to debug a runner?

+

OpenSSH is pre-installed, but it will be inaccessible from your workstation by default.

+

To connect, you can use an inlets tunnel, Wireguard VPN or Tailscale ephemeral token (remember: Tailscale is not free for your commercial use) to log into any agent.

+

We also offer a SSH gateway in some of our tiers, tell us if this is important to you in your initial contact, or reach out to us via email if you're already a customer.

+

See also: Debug a GitHub Action with SSH

+

How can an actuated runner get IAM permissions for AWS?

+

If you need to publish images to Amazon Elastic Container Registry (ECR), you can either assign a role to any EC2 bare-metal instances that you're using with actuated, or use GitHub's built-in OpenID Connect support.

+

Web Identity Federation means that a job can assume a role within AWS using Secure Token Service (STS) without needing any long-lived credentials.

+

Read more: Configuring OpenID Connect in Amazon Web Services

+

Comparison to other solutions

+

Feel free to book a call with us if you'd like to understand this comparison in more detail.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SolutionIsolated VMSpeedEfficient spread of jobsSafely build public repos?64-bit Arm supportMaintenance requiredCost
Hosted runnersPoorNoneFree minutes in plan *Per build minute
actuatedBare-metalYesVery littleFixed monthly cost
Standard self-hosted runnersGoodDIYManual setup and updatesOSS plus management costs
actions-runtime-controllerVaries *DIYVery involvedOSS plus management costs
+
+

1 actions-runtime-controller requires use of separate build tools such as Kaniko, which break the developer experience of using docker or docker-compose. If Docker in Docker (DinD) is used, then there is a severe performance penalty and security risk.

+

2 Builds on public GitHub repositories are free with the standard hosted runners, however private repositories require billing information, after the initial included minutes are consumed.

+
+

You can only get VM-level isolation from either GitHub hosted runners or Actuated. Standard self-hosted runners have no isolation between builds and actions-runtime-controller requires either a Docker socket to be mounted or Docker In Docker (a privileged container) to build and run containers.

+

How does actuated compare to a actions-runtime-controller (ARC)?

+

actions-runtime-controller (ARC) describes itself as "still in its early stage of development". It was created by an individual developer called Yusuke Kuoka, and now receives updates from GitHub's team, after having been adopted into the actions GitHub Organisation.

+

Its primary use-case is scale GitHub's self-hosted actions runner using Pods in a Kubernetes cluster. ARC is self-hosted software which means its setup and operation are complex, requiring you to create an properly configure a GitHub App along with its keys. For actuated, you only need to run a single binary on each of your runner hosts and send us an encrypted bootstrap token.

+

If you're running npm install or maven, then this may be a suitable isolation boundary for you.

+

The default mode for ARC is a reuseable runner, which can run many jobs, and each job could leave side-effects or poison the runner for future job runs.

+

If you need to build a container, in a container, on a Kubernetes node offers little isolation or security boundary.

+

What if ARC is configured to use "rootless" containers? With a rootless container, you lose access to "root" and sudo, both of which are essential in any kind of CI job. Actuated users get full access to root, and can run docker build without any tricks or losing access to sudo. That's the same experience you get from a hosted runner by GitHub, but it's faster because it's on your own hardware.

+

You can even run minikube, KinD, K3s and OpenShift with actuated without any changes.

+

ARC runs a container, so that should work on any machine with a modern Kernel, however actuated runs a VM, in order to provide proper isolation.

+

That means ARC runners can run pretty much anywhere, but actuated runners need to be on a bare-metal machine, or a VM that supports nested virtualisation.

+

See also: Where can I run my agents?

+

Doesn't Kaniko fix all this for ARC?

+

Kaniko, by Google is an open source project for building containers. It's usually run as a container itself, and usually will require root privileges in order to mount the various filesystems layers required.

+

See also: Root user inside a container is root on the host

+

If you're an ARC user and for various reasons, cannot migrate away to a more secure solution like actuated, Kaniko may be a step in the right direction. Google Cloud users could also create a dedicated node pool with gVisor enabled, for some additional isolation.

+

However, it can only build containers, and still requires root, and itself is often run in Docker, so we're getting back to the same problems that actuated set out to solve.

+

In addition, Kaniko cannot and will not help you to run that container that you've just built to validate it to run end to end tests, neither can it run a KinD cluster, or a Minikube cluster.

+

Do we need to run my Actuated Servers 24/7?

+

Let's say that you wanted to access a single 64-bit Arm runner to speed up your Arm builds from 33 minutes to < 2 minutes like in this example.

+

The two cheapest options for 64-bit Arm hardware would be:

+
    +
  • Buy a Mac Mini M1, host it in your office or a co-lo with Asahi Linux installed. That's a one-time cost and will last for several years.
  • +
  • Or you could rent an AWS a1.metal by the hour from AWS with very little up front cost, and pay for the time you use it.
  • +
+

In both cases, we're not talking about a significant amount of money, however we are sometimes asked about whether Actuated Servers need to be running 24/7.

+

The answer if that it's a trade-off between cost and convenience. We recommend running them continually, however you can turn them off when you're not using them if you think it is worth your time to do so.

+

If you only needed to run Arm builds from 9-5pm, you could absolutely delete the VM and re-create it with a cron job, just make sure you restore the required files from the original registration of the agent. You may also be able to "suspend" or "hibernate" the host at a reduced cost, this depends on the hosting provider. Feel free to reach out to us if you need help with this.

+

Is there GPU support?

+

Yes, both for GitHub and GitLab CI.

+

See also: Accelerate GitHub Actions with dedicated GPUs

+

Can Virtual Machines be launched within a GitHub Action?

+

It is possible to launch a Virtual Machine (VM) with KVM from within a Firecracker MicroVM.

+

Use-cases may include: building and snapshotting VM images, running Packer, launching VirtualBox and Vagrant, accelerating the Android emulator, building packages for NixOS and other testing which requires KVM.

+

It's disabled by default, but you can opt-in to the feature by following the steps in this article:

+

How to run a KVM guest in your GitHub Actions

+

At time of writing, only Intel and AMD CPUs support nested virtualisation.

+

What about Arm? According to our contacts at Ampere, the latest versions of Arm hardware have some support for nested virtualisation, but the patches for the Linux Kernel are not ready.

+

Can I use a VM for an actuated server instead of bare-metal?

+

If /dev/kvm is available within the VM, or the VM can be configured so that nested virtualisation is available, then you can use a VM as an actuated server. Any VMs that are launched for CI jobs will be launched with nested virtualisation, and will have some additional overheads compared to a bare-metal server.

+

See also: Provision a server

+

Is Windows or MacOS supported?

+

Linux is the only supported platform for actuated at this time on a AMD64 or 64-bit Arm architecture. We may consider other operating systems in the future, feel free to reach out to us.

+

Is Actuated free and open-source?

+

Actuated currently uses the Firecracker project to launch MicroVMs to isolate jobs during CI. Firecracker is an open source Virtual Machine Manager used by Amazon Web Services (AWS) to run serverless-style workloads for AWS Lambda.

+

Actuated is a commercial B2B product and service created and operated by OpenFaaS Ltd.

+

Read the End User License Agreement (EULA)

+

The website and documentation are available on GitHub and we plan to release some open source tools in the future to improve customer experience.

+

Is there a risk that we could get "locked-in" to actuated?

+

No, you can move back to either hosted runners (pay per minute from GitHub) or self-managed self-hosted runners at any time. Bear in mind that actuated solves painful issues with both hosted runners and self-managed self-hosted runners.

+

Why is the brand called "actuated" and "selfactuated"?

+

The name of the software, product and brand is: "actuated". In some places "actuated" is not available, and we liked "selfactuated" more than "actuatedhq" or "actuatedio" because it refers to the hybrid experience of self-hosted runners.

+

Privacy policy & data security

+

Actuated is a managed service operated by OpenFaaS Ltd, registered company number: 11076587.

+

It has both a Software as a Service (SaaS) component ("control plane") aka ("Actuated") and an agent ("Actuated Agent"), which runs on a Server supplied by the customer ("Customer Server").

+

Data storage

+

The control-plane of actuated collects and stores:

+
    +
  • Job events for the organisation where a label of "actuated*" is found, including:
      +
    • Organisation name
    • +
    • Repository name
    • +
    • Actor name for each job
    • +
    • Build name
    • +
    • Build start / stop time
    • +
    • Build status
    • +
    +
  • +
+

The following is collected from agents:

+
    +
  • Agent version
  • +
  • Hostname & uptime
  • +
  • Platform information - Operating System and architecture
  • +
  • System capacity - total and available RAM & CPU
  • +
+

In addition, for support requests, we may need to collect the logs of the actuated agent process remotely from:

+
    +
  • VMs launched for jobs, stored at /var/log/actuated/
  • +
+

This information is required to operate the control plane including scheduling of VMs and for technical support.

+

Upon cancelling a subscription, a customer may request that their data is deleted. In addition, they can uninstall the GitHub App from their organisation, and deactivate the GitHub OAuth application used to authenticate to the Actuated Dashboard.

+

Data security & encryption

+

TLS is enabled on the actuated control plane, the dashboard and on each agent. The TLS certificates have not expired and and have no known issues.

+

Each customer is responsible for hosting their own Servers and installing appropriate firewalls or access control.

+

Each Customer Server requires a unique token which is encrypted using public key cryptography, before being shared with OpenFaaS Ltd. This token is used to authenticate the agent to the control plane.

+

Traffic between the control plane and Customer Server is only made over HTTPS, using TLS encryption and API tokens. In addition, the token required for GitHub Actions is double encrypted with an RSA key pair, so that only the intended agent can decrypt and use it. These tokens are short-lived and expire after 59 minutes.

+

Event data recorded from GitHub Actions is stored and used to deliver quality of service and scheduling. This data is stored on a server managed by DigitalOcean LLC in the United Kingdom. The control plane is hosted with Linode LLC in the United Kingdom.

+

No data is shared with third parties.

+

Software Development Life Cycle

+
    +
  • A Version Control System (VCS) is being Used - GitHub is used by all employees to store code
  • +
  • Only Authorized Employees Access Version Control - multiple factor authentication (MFA) is required by all employees
  • +
  • Only Authorized Employees Change Code - no changes can be pushed to production without having a pull request approval from senior management
  • +
  • Production Code Changes Restricted - Only authorized employees can push orm make changes to production code
  • +
  • All changes are documented through pull requests tickets and commit messages
  • +
  • Vulnerability management - vulnerability management is provided by GitHub.com. Critical vulnerabilities are remediated in a timely manner
  • +
+

Terminated Employee Access Revoked Within One Business Day - all access to source control management and production systems is revoked within one business day of an employee leaving the company.

+

Access to corporate network, production machines, network devices, and support tools requires a unique ID. This ID is only issued to employees and is revoked upon termination.

+

Policies Cover Employee Confidentiality - OpenFaaS Ltd policies require employees to keep confidential any information they learn while handling customer data.

+

Contact Information Available to Customers

+

OpenFaaS Ltd has provided an email address in a customer-accessible support documentation where support contact information is readily available. Users are encouraged to contact appropriate OpenFaaS Ltd if they become aware of items such as operational or security failures, incidents, system problems, concerns, or other issues/complaints.

+

Reliability and uptime

+

Authorized users have access to centralised logging endpoints, to query the logs of the Actuated agent installed on Customer Servers, ad-hoc, for the purpose of support and troubleshooting.

+

Authorized users have access to alerts, dashboards and may use this data to improve the service, or to proactively contact customers when there is a suspected issue.

+

Centralised monitoring and metrics gathered from the control plane have a 14-day retention period, after which data is automatically deleted.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/images/actuated-badge.png b/images/actuated-badge.png new file mode 100644 index 0000000..ce79923 Binary files /dev/null and b/images/actuated-badge.png differ diff --git a/images/builtin-tls.png b/images/builtin-tls.png new file mode 100644 index 0000000..f0029fb Binary files /dev/null and b/images/builtin-tls.png differ diff --git a/images/conceptual-high-level.png b/images/conceptual-high-level.png new file mode 100644 index 0000000..e8cc738 Binary files /dev/null and b/images/conceptual-high-level.png differ diff --git a/images/conceptual.png b/images/conceptual.png new file mode 100644 index 0000000..73b7941 Binary files /dev/null and b/images/conceptual.png differ diff --git a/images/dashboard/build-queue.png b/images/dashboard/build-queue.png new file mode 100644 index 0000000..a8de804 Binary files /dev/null and b/images/dashboard/build-queue.png differ diff --git a/images/dashboard/org-usage.png b/images/dashboard/org-usage.png new file mode 100644 index 0000000..aea3fe6 Binary files /dev/null and b/images/dashboard/org-usage.png differ diff --git a/images/dashboard/repo-usage.png b/images/dashboard/repo-usage.png new file mode 100644 index 0000000..ccaf8b2 Binary files /dev/null and b/images/dashboard/repo-usage.png differ diff --git a/images/dashboard/runners.png b/images/dashboard/runners.png new file mode 100644 index 0000000..6d52b09 Binary files /dev/null and b/images/dashboard/runners.png differ diff --git a/images/dashboard/user-usage.png b/images/dashboard/user-usage.png new file mode 100644 index 0000000..584db50 Binary files /dev/null and b/images/dashboard/user-usage.png differ diff --git a/images/install_github_app.png b/images/install_github_app.png new file mode 100644 index 0000000..76106c8 Binary files /dev/null and b/images/install_github_app.png differ diff --git a/images/k3s-matrix.png b/images/k3s-matrix.png new file mode 100644 index 0000000..fa88d7f Binary files /dev/null and b/images/k3s-matrix.png differ diff --git a/images/logo.png b/images/logo.png new file mode 100644 index 0000000..fc08849 Binary files /dev/null and b/images/logo.png differ diff --git a/images/oauth-dashboard-access.png b/images/oauth-dashboard-access.png new file mode 100644 index 0000000..27a176f Binary files /dev/null and b/images/oauth-dashboard-access.png differ diff --git a/images/onboarding-steps.png b/images/onboarding-steps.png new file mode 100644 index 0000000..2aed623 Binary files /dev/null and b/images/onboarding-steps.png differ diff --git a/images/peering.png b/images/peering.png new file mode 100644 index 0000000..a5b78a8 Binary files /dev/null and b/images/peering.png differ diff --git a/images/read-only-public-token.png b/images/read-only-public-token.png new file mode 100644 index 0000000..2c38406 Binary files /dev/null and b/images/read-only-public-token.png differ diff --git a/images/ssh-sessions.jpg b/images/ssh-sessions.jpg new file mode 100644 index 0000000..8f69acc Binary files /dev/null and b/images/ssh-sessions.jpg differ diff --git a/images/today-glance.png b/images/today-glance.png new file mode 100644 index 0000000..00cc7b7 Binary files /dev/null and b/images/today-glance.png differ diff --git a/images/tunnel-server.png b/images/tunnel-server.png new file mode 100644 index 0000000..8606479 Binary files /dev/null and b/images/tunnel-server.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..ab59f94 --- /dev/null +++ b/index.html @@ -0,0 +1,1255 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

actuated

+

Actuated brings blazingly fast, secure builds to self-hosted CI runners.

+

Building containers on self-hosted runners is slow and insecure

+

Most solutions that use containers for running Docker or Kubernetes in CI have very poor security boundaries. They require either privileged containers (root on the host), a shared Docker socket (root on the host), third-party tools which don't integrate well and still require root to mount folders, or user namespaces which come with their own limitations. The chances are, if you use Docker or K8s in your CI, and run with: actions-runner-controller, Jenkins, or GitLab, then you may be compromising on security or user experience.

+

Management is a nightmare

+

Self-hosted CI runners are continually out of date, and require fine-tuning to get all the right packages in place and Kernel modules to build containers and cloud-native software. You'll also have to spend extra time making sure builds don't conflict, and that they can't cause side effects to system-level packages. What if you need two different version of some software?

+

If you haven't felt this pain yet, then perhaps you're blissfully unaware or are not updating your packages?

+
+

Are you running privileged containers for CI in your organisation? Are you sharing a Docker Socket (just as bad!)? Are you running Docker in Docker (DIND)? 🙈

+
+

Self-managed runners are inefficient and overprovisioned

+

Self-hosted runners are typically over-provisioned meaning you're spending too much money.

+

Why are they over-provisioned? Because you never know how many jobs you'll have to run, so you have to make them bigger, or have too many hosts available.

+

Why are they inefficient?

+

By default, the self-hosted runner will only schedule one job per host at a time, because GitHub has no knowledge of the capacity of your machines. So each and every build you run could consume all the resources on the host. The second reason is that builds often conflict with one another causing side effects that only happen in CI and are really difficult to track down and reproduce.

+

Actuated uses VMs to slice up the whole machine, and can run many builds in parallel. The net effect is that your build queue will get cleared down much more quickly.

+

Hands-free, VM-level isolation

+

Actuated provides a fast-booting microVM which can run Docker, Kubernetes and anything else you need, with full root on the VM, and no access to the host. Each environment is created just in time to take a build, and is removed immediately after.

+

Boot time is usually ~1-2 seconds for the VM, that extra second is because we start Docker as part of the boot-up process.

+
+

What does "actuated" mean?

+

Something that activates or impels itself; specifically (a machine, device, etc.) that causes itself to begin operating automatically, self-activating.

+
+

We maintain a VM image that is updated regularly through an automated build, so you don't have to install SDKs, runtimes or language packs on your build machines.

+

Just enable automated updates on your server then install the actuated agent. We'll do the rest including managing efficient allocation across your fleet of servers, and updating the CI image.

+

And actuated will run your jobs efficiently across a fleet of hosts, or a single machine. They each need to be either bare-metal hosts (think: AWS Metal / Graviton, Equinix Metal, etc), or support nested virtualization (a feature available on GCP and DigitalOcean)

+

What people are saying

+ +

Watch a live demo

+

Alex shows you how actuated uses an isolated, immutable microVM to run K3s inside of a GitHub Action, followed by a matrix build that causes 5 VMs to be launched. You'll see how quick and easy it is to enable actuated, and how it can buffer and queue up jobs, when there's no remaining capacity in your fleet of agents.

+ + +

You can also watch a webinar that Alex recorded with Richard Case from Weaveworks on how microVMs compare to containers and legacy VMs, you'll see Alex's demo at: 1:13:19.

+

Conceptual overview

+

Conceptual flow of starting up a new ephemeral runner

+
+

Actuated will schedule builds across your fleet of agents, packing them in densely, without overloading the host. Each microVM will run just one build before being destroyed to ensure a clean, isolated build.

+
+

Learn more in the FAQ

+

Get started

+ +

Comparison

+

Feel free to book a call with us if you'd like to understand this comparison in more detail.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SolutionIsolated VMSpeedEfficient spread of jobsSafely build public repos?ARM64 supportMaintenance requiredCost
Hosted runnersPoorNoneFree minutes in plan 1Per build minute
actuatedBare-metalYesVery littleFixed monthly cost
Standard self-hosted runnersGoodDIYManual setup and updatesOSS plus management costs
actions-runtime-controllerVaries 2DIYVery involvedOSS plus management costs
+
+

1 actions-runtime-controller requires use of separate build tools such as Kaniko, which break the developer experience of using docker or docker-compose. If Docker in Docker (DinD) is used, then there is a severe performance penalty and security risk.

+

2 Builds on public GitHub repositories are free with the standard hosted runners, however private repositories require billing information, after the initial included minutes are consumed.

+
+

You can only get VM-level isolation from either GitHub hosted runners or Actuated. Standard self-hosted runners have no isolation between builds and actions-runtime-controller requires either a Docker socket to be mounted or Docker In Docker (a privileged container) to build and run containers.

+

Got questions, comments or suggestions?

+

actuated is trademark of OpenFaaS Ltd.

+

You can contact the team working on actuated via email at: contact@openfaas.com

+

Follow @selfactuated on Twitter for updates and announcements

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/install-agent/index.html b/install-agent/index.html new file mode 100644 index 0000000..1cd5bcc --- /dev/null +++ b/install-agent/index.html @@ -0,0 +1,1206 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Install the Agent - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Add your first agent to actuated

+

actuated is split into three parts:

+
    +
  1. An Actuated Agent (agent) that you run on your own machines or VMs (server), which can launch a VM with a single-use GitHub Actions runner.
  2. +
  3. A VM image launched by the agent, with all the preinstalled software found on a hosted GitHub Actions runner.
  4. +
  5. Our own control plane that talks to GitHub on your behalf, and schedules builds across your fleet of agents.
  6. +
+

All we need you to do is to install our agent on one or more servers, then we take care of the rest. We'll even be able to tell you if your server goes offline for any reason.

+
+

Have you registered your organisation yet?

+

Before you can add an agent, you or your GitHub organisation admin will need to install the: Actuated GitHub App.

+
+

Pick your Actuated Servers

+

Pick your Actuated Servers carefully using our guide: Pick a host for actuated

+

Review the End User License Agreement (EULA)

+

Make sure you've read the Actuated EULA before registering your organisation with the actuated GitHub App, or starting the agent binary on one of your hosts.

+

If you missed it in the "Provision a Server" page, we recommend you use Ubuntu 22.04 as the host operating system on your Server.

+

Install the Actuated Agent

+
+

Do you want a free, expert installation?

+

Our team can install the agent and configure the server for you. Just request our public SSH key, and add it to .ssh/authorized_keys and create a DNS A or CNAME record for your server, and send all the details over to us on the Actuated Slack.

+

Alternatively, you can run through the semi-automatic installation with the details below.

+
+
    +
  1. +

    Install your license for actuated

    +

    The license is available in the email you received when you purchased your subscription. If someone else bought the subscription, they should be able to forward it to you.

    +

    Run the following, then paste in your license, hit enter once, then Control + D to save the file.

    +
    mkdir -p ~/.actuated
    +cat > ~/.actuated/LICENSE
    +
    +
  2. +
  3. +

    Download the Actuated Agent and installation script to the server

    +
    +

    Setting up an ARM64 agent? Wherever you see agent in a command, change it to: agent-arm64. So instead of agent keygen you'd run agent-arm64 keygen.

    +
    +

    Install arkade using the command below, or download it from the releases page.

    +

    Download the latest agent and install the binary to /usr/local/bin/:

    +
    (
    +# Install arkade
    +curl -sLS https://get.arkade.dev | sudo sh
    +
    +# Use arkade to extract the agent from its OCI container image
    +arkade oci install ghcr.io/openfaasltd/actuated-agent:latest --path ./agent
    +chmod +x ./agent/agent*
    +sudo mv ./agent/agent* /usr/local/bin/
    +)
    +
    +

    Run the setup.sh script which will install all the required dependencies like containerd, CNI and Firecracker.

    +

    For best performance, a dedicated drive, volume or partition is required to store the filesystems for running VMs. If you do not have a volume or extra drive attached, then you can shrink the root partition, and use the resulting free space.

    +
    (
    +cd agent
    +VM_DEV=/dev/nvme0n2 sudo -E ./install.sh
    +)
    +
    +

    If you do not have additional storage available at this time, the installer will generate a loopback filesystem for you.

    +
    (
    +cd agent
    +sudo -E ./install.sh
    +)
    +
    +
  4. +
  5. +

    Generate your enrollment file

    +

    You'll need to create a DNS A or CNAME record for each server you add to actuated, this could be something like server1.example.com for instance.

    +

    Run the following to create an enrollment file at $HOME/.actuated/agent.yaml:

    +
    +

    For an Arm server run agent-arm64 instead of agent

    +
    +
    agent enroll --url https://server1.example.com
    +
    +

    The enrollment file contains:

    +
      +
    • The hostname of the server
    • +
    • The public key of the agent which we use to encrypt tokens sent to the agent to bootstrap runners to GitHub Actions
    • +
    • A unique API token encrypted with our public key, which is used by the control plane to authenticate each message sent to the agent
    • +
    +
  6. +
  7. +

    Configure and start the agent

    +

    Use the install-service command to configure and install a systemd service to start the actuated agent.

    +

    The actuated control plane will only communicate with agents exposed over HTTPS to ensure proper encryption is in place. An API token is used in addition with the TLS connection for all requests.

    +

    Any bootstrap tokens sent to the agent are further encrypted with the agent's public key.

    +

    For hosts with public IPs, you will need to use the built-in TLS provisioning with Let's Encrypt. For hosts behind a firewall, NAT or in a private datacenter, you can use inlets to create a secure tunnel to the agent.

    +

    We're considering other models for after the pilot, for instance GitHub's own API has the runner make an outbound connection and uses long-polling.

    +

    These steps are for hosts with public IP addresses, if you want to use inlets, jump to the end of this step.

    +

    The easiest way to configure everything is to run as root. The --user flag can be used to run under a custom user account, however sudo access is still required for actuated.

    +

    For an x86_64 server, run:

    +
    DOMAIN=agent1.example.com
    +
    +sudo -E agent install-service \
    +  --letsencrypt-domain $DOMAIN \
    +  --letsencrypt-email webmaster@$DOMAIN
    +
    +

    For an Arm server, run:

    +
    DOMAIN=agent1.example.com
    +
    +sudo -E agent-arm64 install-service \
    +  --letsencrypt-domain $DOMAIN \
    +  --letsencrypt-email webmaster@$DOMAIN
    +
    +
    +

    Note the different binary name: agent-arm64

    +
    +

    If you need to make changes you can run the command again, or edit /etc/default/actuated.

    +

    Check the service's status with:

    +
    sudo systemctl status actuated
    +sudo journalctl -u actuated --since today -f
    +
    +

    For an Actuated Agent behind a firewall, or on a private network, do not include the --letsencrypt-* flags, and instead add --listen-addr "127.0.0.1:". Then read expose the agent with HTTPS for details on our private peering option or how to setup an inlets tunnel.

    +

    For example (with inlets):

    +
    sudo -E agent install-service \
    +    --listen-addr "127.0.0.1:"
    +
    +
  8. +
  9. +

    Check that the control-plane is accessible

    +
    curl -i https://server1.example.com
    +
    +

    A correct response is a 403.

    +
  10. +
  11. +

    Send us your agent's connection info

    +

    Share the $HOME/.actuated/agent.yaml file with us so we can add your agent to the actuated control plane.

    +

    We'll let you know once we've added your agent to actuated and then it's over to you to start running your builds.

    +

    Once you've run our test build, you need to run the steps for systemd mentioned above.

    +
  12. +
+

Next steps

+

You can now start your first build and see it run on your actuated agent.

+

Start a build on your agent

+
name: ci
+
+on: push
+
+jobs:
+  build-golang:
+-    runs-on: ubuntu-latest
++    runs-on: actuated-4cpu-16gb
+
+

The amount of RAM and CPU can be picked independently.

+

For Arm servers change the prefix from actuated- to actuated-arm64:

+
name: ci
+
+on: push
+
+jobs:
+  build-golang:
+-    runs-on: ubuntu-latest
++    runs-on: actuated-arm64-8cpu-32gb
+
+
+

You can also specify actuated-any-4cpu-8gb if you don't mind whether the job runs on one of your amd64 or arm64 servers.

+
+

Other considerations

+

If you'd like to install a firewall, ufw should be relatively quick to configure.

+

You will need the following ports open:

+
    +
  • 443 - the encrypted control plane for actuated
  • +
  • 80 - used with Let's Encrypt to obtain a TLS certificate during the HTTP01 challenge
  • +
  • 22 - we recommend leaving port 22 open so that you can log into the machine with SSH, if needed. You could also change this to a high or random port
  • +
+

We do not recommend restricting outgoing traffic on the server as this will probably cause you issues with your builds.

+

See also: Troubleshooting your agent

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/provision-server/index.html b/provision-server/index.html new file mode 100644 index 0000000..f68a0c7 --- /dev/null +++ b/provision-server/index.html @@ -0,0 +1,1208 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Provision a Server - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Provision a Server

+ +

Provision a Server for actuated

+

You'll need to provision a Server which is capable of virtualisation with Linux KVM. Each of your builds will run in an isolated microVM, with its own networking, Kernel and immutable filesystem.

+

We have done extensive research and testing both independently and with our customers. The recommendations on this page are updated regularly. We recommend bare-metal for the best performance, but cloud VMs which support nested virtualisation are also an option.

+

Did you know? Bare-metal servers from European providers are available from 50-150 EUR / mo. Using your own hardware can also be really cost effective.

+

So what makes one server quicker than another?

+
    +
  • CPU Clock speed - the base and turbo speeds affect how some builds perform like Go and Rust
  • +
  • Core core - The amount of vCPU allocated to a build affects multi-processing
  • +
  • RAM and disk space - tune these to your needs to prevent builds slowing down
  • +
  • Generation of hardware - hosted runners may use obsolete hardware, you can use the latest generation
  • +
  • Network bandwidth - how quickly images, artifacts and caches will be transferred
  • +
  • Storage - NVMe is the only viable option for high performance builds
  • +
  • Multi-tenancy - are other customers contenting for the same resources, or is the server dedicated to your team?
  • +
+
+

What Operating System (OS) should I use?

+

The certified Operating System for an Actuated server is: Ubuntu Server 22.04.

+
+

How many VMs or jobs can a server run?

+

Depending on the level of concurrency in your plan, each server will be able to run a set number of jobs. So we suggest dividing the RAM and CPU threads between them. For instance, if your server has 32 threads and 128GB of RAM, you could allocate 6 vCPU and 25 GB of RAM to each job for 5x jobs in parallel, or 4x vCPU and 12GB of RAM for 10x jobs in parallel.

+

In addition, you can also specify vCPU and RAM requirements on a per-job basis by changing the runs-on: actuated label to: runs-on: actuated-2cpu-8gb and so forth. This is useful for when you have a particular jobs which needs a lot of resources like building Kernels, Kubernetes/Docker E2E tests and browser testing.

+

Just tell me what I need

+

For the absolute best value in terms of performance and cost, we recommend the following options from Hetzner's Dedicated range:

+ +

Servers on Hetnzer arrive with a "rescue" system, use it to install Ubuntu 22.04, and make sure you disable software RAID, so that the two NVMe drives are presented as separate devices. One will run the system, the other will be used for filesystems for all the VMs.

+

Our research on servers for actuated

+
+

Want us to recommend a server?

+

There's a lot of options when it comes to picking a server. On the onboarding call, we can help you find a match for your performance requirements, budget, and vendor preferences.

+
+

Intel/AMD

+
+

1000 USD free credit for bare-metal

+

Equinix Metal have partnered with us to offer 1000 USD of credit for new customers to use on actuated. This will cover your usage for one month using an AMD Epyc server. You can request the discount code after purchasing your actuated subscription.

+
+

Intel and AMD CPUs can be used interchangeable and are known as amd64 or x86_64.

+
    +
  1. +

    Bare-metal on the cloud (higher cost, convenient, high performance)

    +

    Bare-metal doesn't have to mean managing hardware in your own physical datacenter. You can deploy machines by API, pay-as-you-go and get the highest performance available.

    +

    Bear in mind that whilst the cost of bare-metal is higher than VMs, you will be able to pack more builds into them and get better throughput since actuated can schedule builds much more efficiently than GitHub's self-hosted runner.

    +

    We have seen the best performance from hosts with high clock speeds like the recent generation of AMD processors, combined with local NVMe storage. Rotational drives and SATA SSDs are significantly slower. At the lower end of bare-metal providers, you'll pay 40-50 EUR / mo per host, moving up to 80-150 EUR / mo for NVMe and AMD processors, when you go up to enterprise-grade bare-metal with 10Gbit uplinks, you'll be more in the range of 500-1500 USD / mo.

    +

    Some providers have a setup fee, a one-month commitment, or they don't have an API/automated way to order machines. This coupled with the low costs and capacity of bare-metal means autoscaling servers is usually unnecessary.

    +

    There are at least a dozen options for hosted bare-metal servers:

    + +

    You can see a separate list here.

    +
    +

    A note on Scaleway: Having tested several of Scaleway bare-metal offerings, we do not recommend their current generation of bare-metal due to slow I/O and CPU speeds.

    +
    +

    Equinix Metal have partnered with us to offer 500 USD of credit for new customers to use on actuated. You'll get the discount code after signing up with us. We've tested their c3.small.x86 and c2.small.x86 machines, and they are very fast, with enterprise-grade networking and support included, with many different regions available.

    +

    Are you on a budget or looking to cut costs? Both Ionos (UK) and Hetzner (Germany) have excellent value, with NVMe storage very fast AMD CPUs available.

    +

    Hetzner have a minimum commitment of one month, and most of the time will also charge a one-time setup fee. We recommend their AX-Line with NVMe and ECC RAM - for instance the AX41-NVME, AX52, or AX102. The best machine on offer is the AX161 which also has a fast delivery time.

    +
  2. +
  3. +

    Cloud Virtual Machines (VMs) with nested virtualization (lowest cost, convenient, mid-level performance)

    +

    This option may not have the raw speed and throughput of a dedicated, bare-metal host, but keeps costs low and is convenient for getting started.

    +

    We know of at least three providers which have options for nested virtualisation: DigitalOcean, Google Compute Platform (GCP) (new customers get 300 USD free credits from GCP) support nested virtualisation on their Virtual Machines (VMs), and Azure.

    +
  4. +
  5. +

    Bare-metal on-premises (cheap, convenient, high performance)

    +

    Running bare-metal on-premises is a cost-effective and convenient way to re-use existing hardware investment.

    +

    The machine could be racked in your server room, under your desk, or in a co-location datacenter.

    +

    You can use inlets to expose your agent to actuated.

    +

    Make sure you segment or isolate the agent into its own subnet, VLAN, DMZ, or VPC so that it cannot access the rest of your network. If you are thinking of running an actuated runner at home, we have suggested iptables rules that worked well for our own testing.

    +
  6. +
+

Arm

+

64-bit Arm is also known as both aarch64 and arm64.

+

Arm CPUs are highly efficient when it comes to power consumption and pack in many more cores than the typical x86_64 CPU. This makes them ideal for running many builds in parallel. In typical testing, we've seen Arm builds running under emulation taking 35-45 minutes being reduced to 1-3 minutes total.

+

For Fluent Bit, a build that was failing after 6 hours using QEMU completed in just 4 minutes using actuated and an Ampere Altra server.

+
    +
  1. +

    Arm on-demand, in the cloud

    +

    For ARM64, Hetzner provides outstanding value in their RX-Line with 128GB / 256GB RAM coupled with NVMe and 80 cores for around 200 EUR / mo. These are Ampere Altra Servers. There is a minimum commitment of one month, and an initial setup cost per server.

    +

    We have several customers running Intel/AMD and Arm builds on Hetzner who have been very happy. Stock can take anywhere between hours, days or weeks to be delivered, and could run out, so check their status page before ordering.

    +

    Glesys have the Ampere Altra Q80-26 available for roughly €239 / mo. They are a very similar price to Hetzner and are based in Sweden.

    +

    PhoenixNAP just started to stock the Ampere Altra Q80-30 as of June 2023. These can be bought on a commitment of hourly, monthly or annually with a varying discount. The range was between 600-700 USD / mo.

    +

    Following on from that, you have the a1.metal instance on AWS with 16 cores and 30GB / RAM for roughly 0.4 USD / hour, and roughly half that cost with a 1x year reservation. The a1.metal is the first generation of Graviton and in our testing with customers came up quite a bit slower than Ampere or Graviton 3. On the plus side, these machines are cheap and if you're already on AWS, it may be easier to start with. GP3 volumes or provisioned concurrency may increase performance over the default of GP2 volumes. Reach out to us for more information.

    +

    For responsive support, faster uplinks, API-provisioning, per-minute billing and enterprise-grade networking, take a look at the c3.large.arm64 (Ampere Altra) from Equinix Metal. These machines come in at around 2.5 USD / hour, but are packed out with many cores and other benefits. You can usually provision these servers in the Washington DC and Dallas metros. Cloud Native Computing Foundation (CNCF) projects may be able to apply for free credits from Equinix Metal.

    +
  2. +
  3. +

    Arm for on-premises

    +

    For on-premises ARM64 builds, we recommend the Mac Mini M1 (2020) with 16GB RAM and 512GB storage with Asahi Linux. The M2 is unable to run Linux at this time.

    +

    Ampere and their partners also offer 1U and 2U servers, along with and desktop-form workstations which can be racked or installed in your office.

    +

    The Raspberry Pi 4 also works when used with an external NVMe, and in one instance was much faster than using emulation with a Hosted GitHub Runner.

    +
  4. +
  5. +

    Arm VMs with nested virtualisation

    +

    The current generations of Arm CPUs available from cloud providers do not support KVM, or nested virtualisation, which means you need to pick from the previous two options.

    +

    There are Arm VMs available on Azure, GCP, and Oracle OCI. We have tested each and since they are based on the same generation of Ampere Altra hardware, we can confirm that they do not have KVM available and will not work for running actuated builds.

    +
  6. +
+

Want to talk to us?

+

Still not sure which option is right for your team? Get in touch with us on the Actuated Slack and we'll help you decide.

+

Next steps

+

Now that you've created a Server or VM with the recommended Operating System, you'll need to install the actuated agent and get in touch with us, to register it.

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/register/index.html b/register/index.html new file mode 100644 index 0000000..ac299df --- /dev/null +++ b/register/index.html @@ -0,0 +1,1076 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Register your GitHub Organisation - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Register your GitHub Organisation

+

Actuated is a managed service, where you plug in your own servers, and we do everything else needed to create and manage self-hosted runners for your GitHub Actions.

+

Plans are paid monthly, without any minimum commitment.

+

What you'll need

+
    +
  • A GitHub organisation
  • +
  • One or more public or private repositories hosted in the organisation
  • +
  • Administrative access to install the actuated GitHub App
  • +
  • A company credit-card to pay for your subscription
  • +
  • One or more bare-metal servers (we'll recommend the best fit for you during onboarding)
  • +
+

We'll guide you through the process

+

Onboarding steps

+
+

We'll walk you through the onboarding process and answer all your questions, so that you can be up and running straight away.

+
+

We've now run over 245,000 VMs for commercial teams, and there's very little for you to do to get started, in most cases, we've seen a 2-3x speed up for x86_64 builds by switching one line in a workflow: runs-on: actuated-4cpu-16gb. For Arm builds, native hardware makes a night and day difference.

+

Book a call with us

+

Some engineers hate talking to sales people. You're not alone, and this is not a sales call, or with sales people. Our pricing is public, and paid month to month by corporate card.

+

The purpose of a call is to understand your goals, and help you pick the best server hardware, hosting company, and subscription plan for your usage.

+

Before the call, generate a usage report with our open-source actions-usage tool for at least 7 days, and either send it over via email or share it with us on the call. It'll help us make a better recommendation.

+

Talk to us

+

Install the GitHub App

+

An administrator from your GitHub organisation will need to install the actuated GitHub App. GitHub Apps provide fine-grained access controls for third-parties integrating with GitHub.

+

Learn more in the FAQ.

+
+

End User License Agreement (EULA)

+

Make sure you've read the Actuated EULA before registering your organisation with the actuated GitHub App.

+
+
    +
  1. Click on the Actuated GitHub App
  2. +
  3. Click Install app
  4. +
  5. Select the organisation you want to install the Actuated app to
  6. +
  7. +

    Install the app on all repositories or select repositories

    +

    Install GitHub app

    +
  8. +
  9. +

    Once installed you will will see the permissions and other configuration options for the Actuated GitHub App on your selected account. If you have multiple organisations, you will need to authorise each one.

    +
  10. +
+

To remove or update the Actuated GitHub app, navigate to "Settings" for your organisation and click on "GitHub Apps" in the left sidebar. Select the Actuated from the list and click "Configure".

+

Next steps

+

Now that you've installed the GitHub App, and picked a subscription plan:

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/roadmap/index.html b/roadmap/index.html new file mode 100644 index 0000000..6111c8b --- /dev/null +++ b/roadmap/index.html @@ -0,0 +1,1062 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Roadmap - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Roadmap

+

Actuated is in a pilot phase, running builds for participating customers. The core experience is functioning and we are dogfooding it for actuated itself, OpenFaaS Pro and inlets.

+

Our goal with the pilot is to prove that there's market fit for a solution like this, and if so, we'll invest more time in automation, user experience, agent autoscaling, dashboards and other polish.

+

The technology being used is run at huge scale in production by AWS (on Lambda and Fargate) and GitHub (self-hosted runners use the same runner software and control plane).

+

We believe actuated solves a real problem making CI fast, efficient, and multi-arch.

+

If you'd like to try it out for your team, Register interest for the pilot.

+

Shipped

+
    +
  • Firecracker MicroVM support for runners
  • +
  • Secure builds for both public and private repos
  • +
  • Fat VM image to match tooling installed by GitHub Actions
  • +
  • KinD support for runner's Kernel
  • +
  • K3s support for runner's Kernel
  • +
  • ARM64 support, including Raspberry Pi 4B
  • +
  • Efficient scheduling of jobs across fleet of agents
  • +
  • Samples for K3s/KinD/Matrix builds and OpenFaaS functions
  • +
  • Subscription plans delivered by Gumroad
  • +
  • API for reviewing connected agents and queue depth
  • +
  • Job event auditing for review via API
  • +
  • Documentation site with detailed GitHub Actions examples
  • +
  • Customer dashboard UI to show connected agents and build queue
  • +
  • Official website actuated.dev
  • +
  • Remote / automated update of agents via control plane
  • +
  • Blog feature on actuated.dev with news, tutorials and updates from our team
  • +
  • Performance testing for Ionos & Scaleway for cost effective AMD bare-metal
  • +
  • Daily build statistics on your dashboard
  • +
  • Docker cache directly on the Actuated Hosts (servers) for much faster builds and avoiding rate-limiting
  • +
  • Subscriptions: migration to LemonSqueezy for lower fees, and more payment options
  • +
  • Dashboard - animation on all data pages for better feedback when refreshing data
  • +
  • Detailed insights across your organisation on usage
  • +
  • Detailed insights across your repos
  • +
  • Detailed insights by committer
  • +
  • Integrated SSH debug for runners within dashboard and CLI
  • +
  • At a glance insights for the day's activity so far
  • +
  • CLI/API for remote logs of VMs and the actuated agent
  • +
  • CLI/API for restarting the agent and rebooting a server
  • +
  • Examples for using S3/Minio running on the server as an actions cache, instead of the default hosted cache within Azure
  • +
  • Specify a custom runner size for an individual workflow - i.e. actuated-8cpu-12gb
  • +
  • Specify actuated-any to run jobs on any available server whether amd64 or arm64, for architecture-agnostic workflows such as npm or for browser testing.
  • +
  • GPU pass-through for ML and AI workloads - Accelerate GitHub Actions with dedicated GPUs - Run AI models with ollama in CI with GitHub Actions
  • +
  • Linux Kernel 6.1 for 64-bit Arm
  • +
  • Burst above subscription concurrency for busy periods - Introducing burst billing and capacity for GitHub Actions
  • +
+

In progress:

+
    +
  • Actuated for self-hosted GitLab. (see below section)
  • +
+

Coming next:

+
    +
  • Linux Kernel 6.1 for x86_64
  • +
  • Support for private, self-hosted GitHub Enterprise Server (GHES) installations
  • +
+

Open for customer contributions:

+
    +
  • Examples for setting up an apt/yum mirror for faster builds
  • +
  • Example for configuring two different Docker pull through registries instead of just one.
  • +
+

Under consideration:

+
    +
  • Custom CA for self-hosted S3, Minio, Docker Registries, apt/yum mirrors, etc.
  • +
  • Summary of CPU/RAM/disk consumption of builds
  • +
  • Right-sizing of build VMs based upon prior build history
  • +
  • Automated agent installation and bootstrap
  • +
  • Actuated for Jenkins
  • +
+

Items marked under consideration are awaiting customer interest. Reach out to us if you'd like to see these features sooner.

+

Is there something else you need? If you're already a customer, contact us via the actuated Slack or Register for interest.

+

Actuated for GitLab

+

Learn about the tech preview

+

Ready for use by customers:

+
    +
  • Actuated integration with self-hosted GitLab CI either on-premises or on the cloud
  • +
  • Ephemeral one-time runners with their own dedicated Docker Daemon
  • +
  • Immutable VM image for each build, built with automation
  • +
  • Schedule jobs across multiple bare-metal hosts or VMs with KVM available
  • +
  • Custom VM size scheduling
  • +
  • Manual enrollment of of projects as required
  • +
+

Coming soon:

+
    +
  • Automatic enrollment of of projects as required when the actuated tag has been added
  • +
  • actuated-cli integration
  • +
  • actuated dashboard - daily glance, runners and build queue
  • +
+

Coming later:

+
    +
  • actuated dashboard - SSH debug, insights / reports
  • +
+

Register your interest if you'd like to talk to our team.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..c2f301c --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"actuated","text":"

Actuated brings blazingly fast, secure builds to self-hosted CI runners.

"},{"location":"#building-containers-on-self-hosted-runners-is-slow-and-insecure","title":"Building containers on self-hosted runners is slow and insecure","text":"

Most solutions that use containers for running Docker or Kubernetes in CI have very poor security boundaries. They require either privileged containers (root on the host), a shared Docker socket (root on the host), third-party tools which don't integrate well and still require root to mount folders, or user namespaces which come with their own limitations. The chances are, if you use Docker or K8s in your CI, and run with: actions-runner-controller, Jenkins, or GitLab, then you may be compromising on security or user experience.

"},{"location":"#management-is-a-nightmare","title":"Management is a nightmare","text":"

Self-hosted CI runners are continually out of date, and require fine-tuning to get all the right packages in place and Kernel modules to build containers and cloud-native software. You'll also have to spend extra time making sure builds don't conflict, and that they can't cause side effects to system-level packages. What if you need two different version of some software?

If you haven't felt this pain yet, then perhaps you're blissfully unaware or are not updating your packages?

Are you running privileged containers for CI in your organisation? Are you sharing a Docker Socket (just as bad!)? Are you running Docker in Docker (DIND)? \ud83d\ude48

"},{"location":"#self-managed-runners-are-inefficient-and-overprovisioned","title":"Self-managed runners are inefficient and overprovisioned","text":"

Self-hosted runners are typically over-provisioned meaning you're spending too much money.

Why are they over-provisioned? Because you never know how many jobs you'll have to run, so you have to make them bigger, or have too many hosts available.

Why are they inefficient?

By default, the self-hosted runner will only schedule one job per host at a time, because GitHub has no knowledge of the capacity of your machines. So each and every build you run could consume all the resources on the host. The second reason is that builds often conflict with one another causing side effects that only happen in CI and are really difficult to track down and reproduce.

Actuated uses VMs to slice up the whole machine, and can run many builds in parallel. The net effect is that your build queue will get cleared down much more quickly.

"},{"location":"#hands-free-vm-level-isolation","title":"Hands-free, VM-level isolation","text":"

Actuated provides a fast-booting microVM which can run Docker, Kubernetes and anything else you need, with full root on the VM, and no access to the host. Each environment is created just in time to take a build, and is removed immediately after.

Boot time is usually ~1-2 seconds for the VM, that extra second is because we start Docker as part of the boot-up process.

What does \"actuated\" mean?

Something that activates or impels itself; specifically (a machine, device, etc.) that causes itself to begin operating automatically, self-activating.

We maintain a VM image that is updated regularly through an automated build, so you don't have to install SDKs, runtimes or language packs on your build machines.

Just enable automated updates on your server then install the actuated agent. We'll do the rest including managing efficient allocation across your fleet of servers, and updating the CI image.

And actuated will run your jobs efficiently across a fleet of hosts, or a single machine. They each need to be either bare-metal hosts (think: AWS Metal / Graviton, Equinix Metal, etc), or support nested virtualization (a feature available on GCP and DigitalOcean)

"},{"location":"#what-people-are-saying","title":"What people are saying","text":"
  • \"We've been piloting Actuated recently. It only took 30s create 5x isolated VMs, run the jobs and tear them down again inside our on-prem environment (no Docker socket mounting shenanigans)! Pretty impressive stuff.\"

    Addison van den Hoeven - DevOps Lead, Riskfuel

  • \"This is great, perfect for jobs that take forever on normal GitHub runners. I love what Alex is doing here.\"

    Richard Case, Principal Engineer, SUSE

  • \"Thank you. I think actuated is amazing.\"

    Alan Sill, NSF Cloud and Autonomic Computing (CAC) Industry-University Cooperative Research Center

  • \"Nice work, security aspects alone with shared/stale envs on self-hosted runners.\"

    Matt Johnson, Palo Alto Networks

  • \"Is there a way to pay github for runners that suck less?\"

    Darren Shepherd, Acorn Labs

  • \"Excited to try out actuated! We use custom actions runners and I think there's something here \ud83d\udd25\"

    Nick Gerace, System Initiative

  • It is awesome to see the work of Alex Ellis with Firecracker VMs. They are provisioning and running GitHub Actions in isolated VMs in seconds (vs minutes).\"

    Rinat Abdullin, ML & Innovation at Trustbit

  • \"This is awesome!\" (After reducing Parca build time from 33.5 minutes to 1 minute 26s)

    Frederic Branczyk, Co-founder, Polar Signals

"},{"location":"#watch-a-live-demo","title":"Watch a live demo","text":"

Alex shows you how actuated uses an isolated, immutable microVM to run K3s inside of a GitHub Action, followed by a matrix build that causes 5 VMs to be launched. You'll see how quick and easy it is to enable actuated, and how it can buffer and queue up jobs, when there's no remaining capacity in your fleet of agents.

You can also watch a webinar that Alex recorded with Richard Case from Weaveworks on how microVMs compare to containers and legacy VMs, you'll see Alex's demo at: 1:13:19.

"},{"location":"#conceptual-overview","title":"Conceptual overview","text":"

Actuated will schedule builds across your fleet of agents, packing them in densely, without overloading the host. Each microVM will run just one build before being destroyed to ensure a clean, isolated build.

Learn more in the FAQ

"},{"location":"#get-started","title":"Get started","text":"
  • Start a subscription or book a call to find out more
  • Read the FAQ
  • Enable actuated for an existing repository
  • Read more in the announcement: Blazing fast CI with MicroVMs
"},{"location":"#comparison","title":"Comparison","text":"

Feel free to book a call with us if you'd like to understand this comparison in more detail.

Solution Isolated VM Speed Efficient spread of jobs Safely build public repos? ARM64 support Maintenance required Cost Hosted runners Poor None Free minutes in plan 1 Per build minute actuated Bare-metal Yes Very little Fixed monthly cost Standard self-hosted runners Good DIY Manual setup and updates OSS plus management costs actions-runtime-controller Varies 2 DIY Very involved OSS plus management costs

1 actions-runtime-controller requires use of separate build tools such as Kaniko, which break the developer experience of using docker or docker-compose. If Docker in Docker (DinD) is used, then there is a severe performance penalty and security risk.

2 Builds on public GitHub repositories are free with the standard hosted runners, however private repositories require billing information, after the initial included minutes are consumed.

You can only get VM-level isolation from either GitHub hosted runners or Actuated. Standard self-hosted runners have no isolation between builds and actions-runtime-controller requires either a Docker socket to be mounted or Docker In Docker (a privileged container) to build and run containers.

"},{"location":"#got-questions-comments-or-suggestions","title":"Got questions, comments or suggestions?","text":"

actuated is trademark of OpenFaaS Ltd.

You can contact the team working on actuated via email at: contact@openfaas.com

Follow @selfactuated on Twitter for updates and announcements

"},{"location":"contact/","title":"Contact us","text":""},{"location":"contact/#contact-us","title":"Contact us","text":"

Would you like to contact us about actuated for your team or oranisation?

Fill out this form, and we'll get in touch shortly after with next steps.

Actuated \u2122 is a trademark of OpenFaaS Ltd.

"},{"location":"contact/#keeping-in-touch","title":"Keeping in touch","text":"
  • Follow us on Twitter - @selfactuated
  • GitHub - github.com/self-actuated
  • Customer Slack - self-actuated.slack.com
"},{"location":"contact/#anything-else","title":"Anything else?","text":"

Looking for technical details about actuated? Try the FAQ.

Are you running into a problem? Try the troubleshooting guide

"},{"location":"dashboard/","title":"Actuated Dashboard","text":"

The actuated dashboard is available to customers for their enrolled organisations.

For each organisation, you can see:

  • Today's builds so far - a quick picture of today's activity across all enrolled organisations
  • Runners - Your build servers and their status
  • Build queue - All builds queued for processing and their status
  • Insights - full build history and usage by organisation, repo and user
  • Job Increases - a list of jobs that have increased in duration over 5 minutes, plus a list of the affected jobs that week

Plus:

  • CLI - install the CLI for management via command line
  • SSH Sessions - connect to a runner via SSH to debug issues or to explore - works on hosted and actuated runners
"},{"location":"dashboard/#todays-builds-so-far","title":"Today's builds so far","text":"

On this page, you'll get today's total builds, total build minutes and a break-down on the statuses - to see if you have a majority of successful or unsuccessful builds.

Today's activity at a glance

Underneath this section, there are a number of tips for enabling a container cache, adjusting your subscription plan and for joining the actuated Slack.

More detailed reports are available on the insights page.

"},{"location":"dashboard/#runners","title":"Runners","text":"

Here you'll see if any of your servers are offline, in a draining status due a restart/update or online and ready to process builds.

The Ping time is how long it takes for the control-plane to check the agent's capacity.

"},{"location":"dashboard/#build-queue","title":"Build queue","text":"

Only builds that are queued (not yet in progress), or already in progress will be shown on this page.

Find out how many builds are pending or running across your organisation and on which servers.

"},{"location":"dashboard/#insights","title":"Insights","text":"

Three sets of insights are offered - all at the organisation level, so every repository is taken into account.

You can also switch the time window between 28 days, 14 days, 7 days or today.

The data is contrasted to the previous period to help you identify spikes and potential issues.

The data for reports always starts from the last complete day of data, so the last 7 days will start from the previous day.

"},{"location":"dashboard/#build-history-and-usage-by-organisation","title":"Build history and usage by organisation.","text":"

Understand when your builds are running at a higher level - across all of your organisations - in one place.

You can click on Minutes to switch to total time instead of total builds, to see if the demand on your servers is increasing or decreasing over time.

"},{"location":"dashboard/#build-history-by-repository","title":"Build history by repository","text":"

When viewing usage at a repository-level, you can easily identify anomalies and hot spots - like mounting build times, high failure rates or lots of cancelled jobs - implying a potential faulty interaction or trigger.

"},{"location":"dashboard/#build-history-per-user","title":"Build history per user","text":"

This is where you get to learn who is trigger the most amount of builds, who may be a little less active for this period and where folks may benefit from additional training due a high failure rate of builds.

"},{"location":"dashboard/#job-increases","title":"Job Increases","text":"

For up to 120 days of history, you can find jobs that have increased over 5 minutes in duration week-by-week. This feature was requested by a team whose builds were roughly 60 minutes each on GitHub's hosted runners, and 20 minutes each on actuated. They didn't want those times to creep up without it being noticed and rectified.

Insights on outliers showing the time that the job increased by, and a button to drill down into the affected jobs that week.

When you click \"Inspect\", a plot will be drawn with the maximum build time recorded on the days of the affected week. You can then click \"View Job\" to see what commit, Pull Request, or configuration change may have caused the increase.

A plot with the longest job run on each day of the affected week

"},{"location":"dashboard/#ssh-sessions","title":"SSH Sessions","text":"

Once you configure an action to pause at a set point by introducing our custom GitHub action step, you'll be able to copy and paste an SSH command and run it in your terminal.

Your SSH keys will be pre-installed and no password is required.

Viewing an SSH session to a hosted runner

See also: Example: Debug a job with SSH

"},{"location":"dashboard/#cli","title":"CLI","text":"

The CLI page has download instructions, you can find the downloads for Linux, macOS and Windows here:

self-actuated/actuated-cli

"},{"location":"expose-agent/","title":"Expose agent","text":""},{"location":"expose-agent/#expose-the-agents-api-over-https","title":"Expose the agent's API over HTTPS","text":"

The actuated agent serves HTTP, and is accessed by the Actuated control plane.

We expect most of our customers to be using hosts with public IP addresses, and the combination of an API token plus TLS is a battle tested combination.

For anyone running with private hosts within a firewall, a private peering option is available for enterprise companies, or our inlets network tunnel can be used with an IP allow list.

"},{"location":"expose-agent/#for-a-host-on-a-public-cloud","title":"For a host on a public cloud","text":"

If you're running the agent on a host with a public IP, you can use the built-in TLS mechanism in the actuated agent to receive a certificate from Let's Encrypt, valid for 90 days. The certificate will be renewed by the actuated agent, so there are no additional administration tasks required.

The installation will automatically configure the below settings. They are included just for reference, so you can understand what's involved or tweak the settings if necessary.

Pictured: Accessing the agent's endpoint built-in TLS and Let's Encrypt

Determine the public IP of your instance:

# curl -s http://checkip.amazonaws.com\n\n141.73.80.100\n

Now imagine that your sub-domain is agent.example.com, you need to create a DNS A or DNS CNAME record of agent.example.com=141.73.80.100, changing both the sub-domain and IP to your own.

Once the agent is installed, edit /etc/default/actuated on the agent and set the following two variables:

AGENT_LETSENCRYPT_DOMAIN=\"agent.example.com\"\nAGENT_LETSENCRYPT_EMAIL=\"webmaster@agent.example.com\"\n

Restart the agent:

sudo systemctl daemon-reload\nsudo systemctl restart actuated\n

Your agent's endpoint URL is going to be: https://agent.example.com on port 443

"},{"location":"expose-agent/#private-hosts-private-peering-for-enterprises","title":"Private hosts - private peering for enterprises","text":"

For enterprise customers, we can offer private peering of the actuated agent for when your servers are behind a corporate firewall, or have no inbound Internet access.

Peering example for an enterprise with two agents within their own private firewalls.

This option is built-into the actuated agent, and requires no additional setup, firewall or routing rules. It's similar to how the GitHub Actions agent works by creating an outbound connection, without relying on any inbound data path.

  • The client makes an outbound connect to the Actuated control-plane.
  • If for any reason, the connection gets closed or severed, it will reconnect automatically.
  • All traffic is encrypted with HTTPS.
  • Only the Actuated control-plane will be able to communicate with the agent, privately.
"},{"location":"expose-agent/#private-hosts-behind-nat-or-at-the-office","title":"Private hosts - behind NAT or at the office","text":"

The default way to configure a server for actuated, is to have its HTTPS endpoint available on the public Internet. A quick and easy way to do that is with our inlets network tunnel tool. This works by creating a VM with a public IP address, then connecting a client from your private network to the VM. Then the port on the private machine becomes available on the public VM for the Actuated control-plane to access as required.

An IP allow-list can also be configured with the egress IP address of the Actuated control-plane. We will provide the egress IP address upon request to customers.

Pictured: Accessing the agent's private endpoint using an inlets-pro tunnel

Reach out to us if you'd like us to host a tunnel server for you, alternatively, you can follow the instructions below to set up your own.

The inletsctl tool will create a HTTPS tunnel server with you on your favourite cloud with a HTTPS certificate obtained from Let's Encrypt.

If you have just the one Actuated Agent:

export AGENT_DOMAIN=agent1.example.com\nexport LE_EMAIL=webmaster@agent1.example.com\n\narkade get inletsctl\nsudo mv $HOME/.arkade/bin/inletsctl /usr/local/bin/\n\ninletsctl create \\\n--provider digitalocean \\\n--region lon1 \\\n--token-file $HOME/do-token \\\n--letsencrypt-email $LE_EMAIL \\\n--letsencrypt-domain $AGENT_DOMAIN\n

Then note down the tunnel's wss:// URL and token.

If you wish to configure an IP allow list, log into the VM with SSH and then edit the systemd unit file for inlets-pro. Add the actuated controller egress IP as per these instructions.

Then run a HTTPS client to expose your agent:

inlets-pro http client \\\n--url $WSS_URL \\\n--token $TOKEN \\\n--upstream http://127.0.0.1:8081\n

For two or more Actuated Servers:

export AGENT_DOMAIN1=agent1.example.com\nexport AGENT_DOMAIN2=agent2.example.com\nexport LE_EMAIL=webmaster@agent1.example.com\n\narkade get inletsctl\nsudo mv $HOME/.arkade/bin/inletsctl /usr/local/bin/\n\ninletsctl create \\\n--provider digitalocean \\\n--region lon1 \\\n--token-file $HOME/do-token \\\n--letsencrypt-email $LE_EMAIL \\\n--letsencrypt-domain $AGENT_DOMAIN1 \\\n--letsencrypt-domain $AGENT_DOMAIN2\n

Then note down the tunnel's wss:// URL and token.

Then run a HTTPS client to expose your agent, using the unique agent domain, run the inlets-pro client on the Actuated Servers:

export AGENT_DOMAIN1=agent1.example.com\ninlets-pro http client \\\n--url $WSS_URL \\\n--token $TOKEN \\\n--upstream $AGENT1_DOMAIN=http://127.0.0.1:8081\n
export AGENT_DOMAIN2=agent2.example.com\ninlets-pro http client \\\n--url $WSS_URL \\\n--token $TOKEN \\\n--upstream $AGENT1_DOMAIN=http://127.0.0.1:8081\n

You can generate a systemd service (so that inlets restarts upon disconnection, and reboot) by adding --generate=systemd > inlets.service and running:

sudo cp inlets.service /etc/systemd/system/\nsudo systemctl daemon-reload\nsudo systemctl enable inlets.service\nsudo systemctl start inlets\n\n# Check status with:\nsudo systemctl status inlets\n

Your agent's endpoint URL is going to be: https://$AGENT_DOMAIN.

"},{"location":"expose-agent/#preventing-the-runner-from-accessing-your-local-network","title":"Preventing the runner from accessing your local network","text":"

Network segmentation

Proper network segmentation of hosts running the actuated agent is required. This is to prevent runners from making outbound connections to other hosts on your local network. We will not accept any responsibility for your configuration.

If hardware isolation is not available, iptables rules may provide an alternative for isolating the runners from your network.

Imagine you were using a LAN range of 192.168.0.0/24, with a router of 192.168.0.1, then the following probes and tests show that the runner cannot access the host 192.168.0.101, and that nmap's scan will come up dry.

We add a rule to allow access to the router, but reject packets going via TCP or UDP to any other hosts on the network.

sudo iptables --insert CNI-ADMIN \\\n--destination  192.168.0.1 --jump ACCEPT\nsudo iptables --insert CNI-ADMIN \\\n--destination  192.168.0.0/24 --jump REJECT -p tcp  --reject-with tcp-reset\nsudo iptables --insert CNI-ADMIN \\\n--destination  192.168.0.0/24 --jump REJECT -p udp --reject-with icmp-port-unreachable\n

You can test the efficacy of these rules by running nmap, mtr, ping and any other probing utilities within a GitHub workflow.

name: CI\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n- main\n\njobs:\nspecs:\nname: specs\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@v1\n- name: addr\nrun: ip addr\n- name: route\nrun: ip route\n- name: pkgs\nrun: |\nsudo apt-get update && \\\nsudo apt-get install traceroute mtr nmap netcat -qy\n- name: traceroute\nrun: traceroute  192.168.0.101\n- name: Connect to ssh\nrun: echo | nc  192.168.0.101 22\n- name: mtr\nrun: mtr -rw  -c 1  192.168.0.101\n- name: nmap for SSH\nrun: nmap -p 22  192.168.0.0/24\n- name: Ping router\nrun: |\nping -c 1  192.168.0.1\n- name: Ping 101\nrun: |\nping -c 1  192.168.0.101\n
"},{"location":"faq/","title":"Frequently Asked Questions (FAQ)","text":""},{"location":"faq/#how-does-it-work","title":"How does it work?","text":"

Actuated has three main parts:

  1. an agent which knows how to run VMs, you install this on your hosts
  2. a VM image and Kernel that we build which has everything required for Docker, KinD and K3s
  3. a multi-tenant control plane that we host, which tells your agents to start VMs and register a runner on your GitHub organisation

The multi-tenant control plane is run and operated by OpenFaaS Ltd as a SaaS.

The conceptual overview showing how a MicroVM is requested by the control plane.

MicroVMs are only started when needed, and are registered with GitHub by the official GitHub Actions runner, using a short-lived registration token. The token is been encrypted with the public key of the agent. This ensures no other agent could use the token to bootstrap a token to the wrong organisation.

Learn more: Self-hosted GitHub Actions API

"},{"location":"faq/#glossary","title":"Glossary","text":"
  • MicroVM - a lightweight, single-use VM that is created by the Actuated Agent, and is destroyed after the build is complete. Common examples include firecracker by AWS and Cloud Hypervisor
  • Guest Kernel - a Linux kernel that is used together with a Root filesystem to boot a MicroVM and run your CI workloads
  • Root filesystem - an immutable image maintained by the actuated team containing all necessary software to perform a build
  • Actuated ('Control Plane') - a multi-tenant SaaS run by the actuated team responsible for scheduling MicroVMs to the Actuated Agent
  • Actuated Agent - the software component installed on your Server which runs a MicroVM when instructed by Actuated
  • Actuated Server ('Server') - a server on which the Actuated Agent has been installed, where your builds will execute.
"},{"location":"faq/#how-does-actuated-compare-to-a-self-hosted-runner","title":"How does actuated compare to a self-hosted runner?","text":"

A self-hosted runner is a machine on which you've installed and registered the a GitHub runner.

Quite often these machines suffer from some, if not all of the following issues:

  • They require several hours to get all the required packages correctly installed to mirror a hosted runner
  • You never update them out of fear of wasting time or breaking something which is working, meaning your supply chain is at risk
  • Builds clash, if you're building a container image, or running a KinD cluster, names will clash, dirty state will be left over

We've heard in user interviews that the final point of dirty state can cause engineers to waste several days of effort chasing down problems.

Actuated uses a one-shot VM that is destroyed immediately after a build is completed.

"},{"location":"faq/#who-is-actuated-for","title":"Who is actuated for?","text":"

actuated is primarily for software engineering teams who are currently using GitHub Actions or GitLab CI.

  • You can outsource your CI infrastructure to the actuated team
  • You'll get VM-level isolation, with no risks of side-effects between builds
  • You can run on much faster hardware
  • You'll get insights on how to fine-tune the performance of your builds
  • And save a significant amount of money vs. larger hosted runners if you use 10s or 100s of thousands of minutes per month

For GitHub users, a GitHub organisation is required for installation, and runners are attached to individual repositories as required to execute builds.

"},{"location":"faq/#is-there-a-sponsored-subscription-for-open-source-projects","title":"Is there a sponsored subscription for Open Source projects?","text":"

We have a sponsored program with the CNCF and Ampere for various Open Source projects, you can find out more here: Announcing managed Arm CI for CNCF projects.

Sponsored projects are required to add our GitHub badge to the top of their README file for each repository where the actuated is being used, along with any other GitHub badges such as build status, code coverage, etc.

[![Arm CI sponsored by Actuated](https://img.shields.io/badge/SA_actuated.dev-004BDD)](https://actuated.dev/)\n

or

<a href=\"https://actuated.dev/\"><img alt=\"Arm CI sponsored by Actuated\" src=\"https://docs.actuated.dev/images/actuated-badge.png\" width=\"120px\"></img></a>\n

For an example of what this would look like, see the inletsctl project README.

"},{"location":"faq/#what-kind-of-machines-do-i-need-for-the-agent","title":"What kind of machines do I need for the agent?","text":"

You'll need either: a bare-metal host (your own machine, Hetzner Dedicated, Equinix Metal, etc), or a VM that supports nested virtualisation such as those provided by OpenStack, GCP, DigitalOcean, Azure, or VMware.

See also: Provision a Server section

"},{"location":"faq/#when-will-jenkins-gitlab-ci-bitbucket-pipeline-runners-drone-or-azure-devops-be-supported","title":"When will Jenkins, GitLab CI, BitBucket Pipeline Runners, Drone or Azure DevOps be supported?","text":"

Support for GitHub Actions and GitLab CI is available.

Unfortunately, other CI systems tend to expect runners to be available indefinitely, which is an anti-pattern. Why? They gather side-effects and often rely on the insecure use of Docker in Docker, privileged containers, or mounting the Docker socket.

If you'd like to migrate to GitHub Actions, or GitLab CI, feel free to reach out to us for help.

"},{"location":"faq/#is-github-enterprise-supported","title":"Is GitHub Enterprise supported?","text":"

GitHub.com's Pro, Team and Enterprise Cloud plans are supported.

GitHub Enterprise Server (GHES) is a self-hosted version of GitHub and requires additional onboarding steps. Please reach out to us if you're interested in using actuated with your installation of GHES.

"},{"location":"faq/#what-kind-of-access-is-required-to-my-github-organisation","title":"What kind of access is required to my GitHub Organisation?","text":"

GitHub Apps provide fine-grained privileges, access control, and event data.

Actuated integrates with GitHub using a GitHub App.

The actuated GitHub App will request:

  • Administrative access to add/remove GitHub Actions Runners to individual repositories
  • Events via webhook for Workflow Runs and Workflow Jobs

Did you know? The actuated service does not have any access to your code or private or public repositories.

"},{"location":"faq/#can-githubs-self-hosted-runner-be-used-on-public-repos","title":"Can GitHub's self-hosted runner be used on public repos?","text":"

Actuated VMs can be used with public repositories, however the standard self-hosted runner when used stand-alone, with Docker, or with Kubernetes cannot.

The GitHub team recommends only running their self-hosted runners on private repositories.

Why?

I took some time to ask one of the engineers on the GitHub Actions team.

With the standard self-hosted runner, a bad actor could compromise the system or install malware leaving side-effects for future builds.

He replied that it's difficult for maintainers to secure their repos and workflows, and that bad actors could compromise a runner host due to the way they run multiple jobs, and are not a fresh environment for each build. It may even be because a bad actor could scan the local network of the runner and attempt to gain access to other systems.

If you're wondering whether containers and Pods are a suitable isolation level, we would recommend against this since it usually involves one of either: mounting a docker socket (which can lead to escalation to root on the host) or running Docker In Docker (DIND) which requires a privileged container (which can lead to escalation to root on the host).

So, can you use actuated on a public repo?

Our contact at GitHub stated that through VM-level isolation and an immutable VM image, the primary concerns is resolved, because there is no way to have state left over or side effects from previous builds.

Actuated fixes the isolation problem, and prevents side-effects between builds. We also have specific iptables rules in the troubleshooting guide which will isolate your runners from the rest of the network.

"},{"location":"faq/#can-i-use-the-containers-feature-of-github-actions","title":"Can I use the containers feature of GitHub Actions?","text":"

Yes, it is supported, however it is not required, and may make it harder to debug your builds. We prefer and recommend running on the host directly, which gives better performance and a simpler experience. Common software and packages are already within the root filesystem, and can be added with setup-X actions, or arkade get or arkade system install.

GitHub Action's Running jobs in a container feature is supported, as is Docker, Buildx, Kubernetes, KinD, K3s, eBPF, etc.

Example of running commands with the docker.io/node:latest image.

jobs:\nspecs:\nname: test\nruns-on: actuated-4cpu-12gb\ncontainer:\nimage: docker.io/node:latest\nenv:\nNODE_ENV: development\nports:\n- 3000\noptions: --cpus 1\nsteps:\n- name: Check for dockerenv file\nrun: node --version\n
"},{"location":"faq/#how-many-builds-does-a-single-actuated-vm-run","title":"How many builds does a single actuated VM run?","text":"

When a VM starts up, it runs the GitHub Actions Runner ephemeral (aka one-shot) mode, so in can run at most one build. After that, the VM will be destroyed.

See also: GitHub: ephemeral runners

"},{"location":"faq/#how-are-vms-scheduled","title":"How are VMs scheduled?","text":"

VMs are placed efficiently across your Actuated Servers using a scheduling algorithm based upon the amount of RAM reserved for the VM.

Autoscaling of VMs is automatic. Let's say that you had 10 jobs pending, but given the RAM configuration, only enough capacity to run 8 of them? The second two would be queued until capacity one or more of those 8 jobs completed.

If you find yourself regularly getting into a queued state, there are three potential changes to consider:

  1. Using Actuated Servers with more RAM
  2. Allocated less RAM to each job
  3. Adding more Actuated Servers

The plan you select will determine how many Actuated Servers you can run, so consider 1. and 2. before 3.

"},{"location":"faq/#do-i-need-to-auto-scale-the-actuated-servers","title":"Do I need to auto-scale the Actuated Servers?","text":"

Please read the section \"How are VMs scheduled\".

Auto-scaling Pods or VMs is a quick, painless operation that makes sense for customer traffic, which is generally unpredictable and can be very bursty.

GitHub Actions tends to be driven by your internal development team, with a predictable pattern of activity. It's unlikely to vary massively day by day, which means autoscaling is less important than with a user-facing website.

In addition to that, bare-metal servers can take 5-10 minutes to provision and may even include a setup fee or monthly commitment, meaning that what you're used to seeing with Kubernetes or AWS Autoscaling Groups may not translate well, or even be required for CI.

If you are cost sensitive, you should review the options under Provision a Server section.

Depending on your provider, you may also be able to hibernate or suspend servers on a cron schedule to save a few dollars. Actuated will hold jobs in a queue until a server is ready to take them again.

"},{"location":"faq/#what-do-i-need-to-change-in-my-workflows-to-use-actuated","title":"What do I need to change in my workflows to use actuated?","text":"

The changes to your workflow YAML file are minimal.

Just set runs-on to the actuated label plus the amount of CPUs and RAM you'd like. The order is fixed, but the values for vCPU/RAM are flexible and can be set as required.

You can set something like: runs-on: actuated-4cpu-16gb or runs-on: actuated-arm64-8cpu-32gb.

"},{"location":"faq/#is-64-bit-arm-supported","title":"Is 64-bit Arm supported?","text":"

Yes, actuated is built to run on both Intel/AMD and 64-bit Arm hosts, check your subscription plan to see if 64-bit Arm is included. This includes a Raspberry Pi 4B, AWS Graviton, Oracle Cloud Arm instances and potentially any other 64-bit Arm instances which support virtualisation.

"},{"location":"faq/#whats-in-the-vm-image-and-how-is-it-built","title":"What's in the VM image and how is it built?","text":"

The VM image contains similar software to the hosted runner image: ubuntu-latest offered by GitHub. Unfortunately, GitHub does not publish this image, so we've done our best through user-testing to reconstruct it, including all the Kernel modules required to run Kubernetes and Docker.

The image is built automatically using GitHub Actions and is available on a container registry.

The primary guest OS version is Ubuntu 22.04. Ubuntu 20.04 is available on request.

"},{"location":"faq/#what-kernel-version-is-being-used","title":"What Kernel version is being used?","text":"

The Firecracker team provides guest configurations. These may not LTS, or the latest version available, however they are fully functional for CI/CD use-cases and are known to work with Firecracker.

Stable Kernel version:

  • x86_64 - Linux Kernel 5.10.201
  • aarch64 - Linux Kernel 5.10.201

Experimental Kernel version:

  • aarch64 - Linux Kernel 6.1.90
"},{"location":"faq/#where-are-the-kernel-headers-includes","title":"Where are the Kernel headers / includes?","text":"

Warning

The following command is only designed for off the shelf cloud image builds of Ubuntu server, and will not work on actuated.

apt-get install linux-headers-$(uname -r) 

For actuated, you'll need to take a different approach to build a DKMS or kmod module for your Kernel.

Add self-actuated/get-kernel-sources to your workflow and run it before your build step.

      - name: Install Kernel headers\nuses: self-actuated/get-kernel-sources@v1\n

An if statement can be added to the block, if you also run the same job on various other types of runners outside of actuated.

"},{"location":"faq/#where-is-the-kernel-configuration","title":"Where is the Kernel configuration?","text":"

You can run a job to print out or dump the configuration from proc, or from /boot/.

Just create a new job, or an SSH debug session and run:

sudo modprobe configs\ncat /proc/config.gz | gunzip > /tmp/config\n\n# Look for a specific config option\ncat /tmp/config | grep \"CONFIG_DEBUG_INFO_BTF\"\n
"},{"location":"faq/#how-easy-is-it-to-debug-a-runner","title":"How easy is it to debug a runner?","text":"

OpenSSH is pre-installed, but it will be inaccessible from your workstation by default.

To connect, you can use an inlets tunnel, Wireguard VPN or Tailscale ephemeral token (remember: Tailscale is not free for your commercial use) to log into any agent.

We also offer a SSH gateway in some of our tiers, tell us if this is important to you in your initial contact, or reach out to us via email if you're already a customer.

See also: Debug a GitHub Action with SSH

"},{"location":"faq/#how-can-an-actuated-runner-get-iam-permissions-for-aws","title":"How can an actuated runner get IAM permissions for AWS?","text":"

If you need to publish images to Amazon Elastic Container Registry (ECR), you can either assign a role to any EC2 bare-metal instances that you're using with actuated, or use GitHub's built-in OpenID Connect support.

Web Identity Federation means that a job can assume a role within AWS using Secure Token Service (STS) without needing any long-lived credentials.

Read more: Configuring OpenID Connect in Amazon Web Services

"},{"location":"faq/#comparison-to-other-solutions","title":"Comparison to other solutions","text":"

Feel free to book a call with us if you'd like to understand this comparison in more detail.

Solution Isolated VM Speed Efficient spread of jobs Safely build public repos? 64-bit Arm support Maintenance required Cost Hosted runners Poor None Free minutes in plan * Per build minute actuated Bare-metal Yes Very little Fixed monthly cost Standard self-hosted runners Good DIY Manual setup and updates OSS plus management costs actions-runtime-controller Varies * DIY Very involved OSS plus management costs

1 actions-runtime-controller requires use of separate build tools such as Kaniko, which break the developer experience of using docker or docker-compose. If Docker in Docker (DinD) is used, then there is a severe performance penalty and security risk.

2 Builds on public GitHub repositories are free with the standard hosted runners, however private repositories require billing information, after the initial included minutes are consumed.

You can only get VM-level isolation from either GitHub hosted runners or Actuated. Standard self-hosted runners have no isolation between builds and actions-runtime-controller requires either a Docker socket to be mounted or Docker In Docker (a privileged container) to build and run containers.

"},{"location":"faq/#how-does-actuated-compare-to-a-actions-runtime-controller-arc","title":"How does actuated compare to a actions-runtime-controller (ARC)?","text":"

actions-runtime-controller (ARC) describes itself as \"still in its early stage of development\". It was created by an individual developer called Yusuke Kuoka, and now receives updates from GitHub's team, after having been adopted into the actions GitHub Organisation.

Its primary use-case is scale GitHub's self-hosted actions runner using Pods in a Kubernetes cluster. ARC is self-hosted software which means its setup and operation are complex, requiring you to create an properly configure a GitHub App along with its keys. For actuated, you only need to run a single binary on each of your runner hosts and send us an encrypted bootstrap token.

If you're running npm install or maven, then this may be a suitable isolation boundary for you.

The default mode for ARC is a reuseable runner, which can run many jobs, and each job could leave side-effects or poison the runner for future job runs.

If you need to build a container, in a container, on a Kubernetes node offers little isolation or security boundary.

What if ARC is configured to use \"rootless\" containers? With a rootless container, you lose access to \"root\" and sudo, both of which are essential in any kind of CI job. Actuated users get full access to root, and can run docker build without any tricks or losing access to sudo. That's the same experience you get from a hosted runner by GitHub, but it's faster because it's on your own hardware.

You can even run minikube, KinD, K3s and OpenShift with actuated without any changes.

ARC runs a container, so that should work on any machine with a modern Kernel, however actuated runs a VM, in order to provide proper isolation.

That means ARC runners can run pretty much anywhere, but actuated runners need to be on a bare-metal machine, or a VM that supports nested virtualisation.

See also: Where can I run my agents?

"},{"location":"faq/#doesnt-kaniko-fix-all-this-for-arc","title":"Doesn't Kaniko fix all this for ARC?","text":"

Kaniko, by Google is an open source project for building containers. It's usually run as a container itself, and usually will require root privileges in order to mount the various filesystems layers required.

See also: Root user inside a container is root on the host

If you're an ARC user and for various reasons, cannot migrate away to a more secure solution like actuated, Kaniko may be a step in the right direction. Google Cloud users could also create a dedicated node pool with gVisor enabled, for some additional isolation.

However, it can only build containers, and still requires root, and itself is often run in Docker, so we're getting back to the same problems that actuated set out to solve.

In addition, Kaniko cannot and will not help you to run that container that you've just built to validate it to run end to end tests, neither can it run a KinD cluster, or a Minikube cluster.

"},{"location":"faq/#do-we-need-to-run-my-actuated-servers-247","title":"Do we need to run my Actuated Servers 24/7?","text":"

Let's say that you wanted to access a single 64-bit Arm runner to speed up your Arm builds from 33 minutes to < 2 minutes like in this example.

The two cheapest options for 64-bit Arm hardware would be:

  • Buy a Mac Mini M1, host it in your office or a co-lo with Asahi Linux installed. That's a one-time cost and will last for several years.
  • Or you could rent an AWS a1.metal by the hour from AWS with very little up front cost, and pay for the time you use it.

In both cases, we're not talking about a significant amount of money, however we are sometimes asked about whether Actuated Servers need to be running 24/7.

The answer if that it's a trade-off between cost and convenience. We recommend running them continually, however you can turn them off when you're not using them if you think it is worth your time to do so.

If you only needed to run Arm builds from 9-5pm, you could absolutely delete the VM and re-create it with a cron job, just make sure you restore the required files from the original registration of the agent. You may also be able to \"suspend\" or \"hibernate\" the host at a reduced cost, this depends on the hosting provider. Feel free to reach out to us if you need help with this.

"},{"location":"faq/#is-there-gpu-support","title":"Is there GPU support?","text":"

Yes, both for GitHub and GitLab CI.

See also: Accelerate GitHub Actions with dedicated GPUs

"},{"location":"faq/#can-virtual-machines-be-launched-within-a-github-action","title":"Can Virtual Machines be launched within a GitHub Action?","text":"

It is possible to launch a Virtual Machine (VM) with KVM from within a Firecracker MicroVM.

Use-cases may include: building and snapshotting VM images, running Packer, launching VirtualBox and Vagrant, accelerating the Android emulator, building packages for NixOS and other testing which requires KVM.

It's disabled by default, but you can opt-in to the feature by following the steps in this article:

How to run a KVM guest in your GitHub Actions

At time of writing, only Intel and AMD CPUs support nested virtualisation.

What about Arm? According to our contacts at Ampere, the latest versions of Arm hardware have some support for nested virtualisation, but the patches for the Linux Kernel are not ready.

"},{"location":"faq/#can-i-use-a-vm-for-an-actuated-server-instead-of-bare-metal","title":"Can I use a VM for an actuated server instead of bare-metal?","text":"

If /dev/kvm is available within the VM, or the VM can be configured so that nested virtualisation is available, then you can use a VM as an actuated server. Any VMs that are launched for CI jobs will be launched with nested virtualisation, and will have some additional overheads compared to a bare-metal server.

See also: Provision a server

"},{"location":"faq/#is-windows-or-macos-supported","title":"Is Windows or MacOS supported?","text":"

Linux is the only supported platform for actuated at this time on a AMD64 or 64-bit Arm architecture. We may consider other operating systems in the future, feel free to reach out to us.

"},{"location":"faq/#is-actuated-free-and-open-source","title":"Is Actuated free and open-source?","text":"

Actuated currently uses the Firecracker project to launch MicroVMs to isolate jobs during CI. Firecracker is an open source Virtual Machine Manager used by Amazon Web Services (AWS) to run serverless-style workloads for AWS Lambda.

Actuated is a commercial B2B product and service created and operated by OpenFaaS Ltd.

Read the End User License Agreement (EULA)

The website and documentation are available on GitHub and we plan to release some open source tools in the future to improve customer experience.

"},{"location":"faq/#is-there-a-risk-that-we-could-get-locked-in-to-actuated","title":"Is there a risk that we could get \"locked-in\" to actuated?","text":"

No, you can move back to either hosted runners (pay per minute from GitHub) or self-managed self-hosted runners at any time. Bear in mind that actuated solves painful issues with both hosted runners and self-managed self-hosted runners.

"},{"location":"faq/#why-is-the-brand-called-actuated-and-selfactuated","title":"Why is the brand called \"actuated\" and \"selfactuated\"?","text":"

The name of the software, product and brand is: \"actuated\". In some places \"actuated\" is not available, and we liked \"selfactuated\" more than \"actuatedhq\" or \"actuatedio\" because it refers to the hybrid experience of self-hosted runners.

"},{"location":"faq/#privacy-policy-data-security","title":"Privacy policy & data security","text":"

Actuated is a managed service operated by OpenFaaS Ltd, registered company number: 11076587.

It has both a Software as a Service (SaaS) component (\"control plane\") aka (\"Actuated\") and an agent (\"Actuated Agent\"), which runs on a Server supplied by the customer (\"Customer Server\").

"},{"location":"faq/#data-storage","title":"Data storage","text":"

The control-plane of actuated collects and stores:

  • Job events for the organisation where a label of \"actuated*\" is found, including:
    • Organisation name
    • Repository name
    • Actor name for each job
    • Build name
    • Build start / stop time
    • Build status

The following is collected from agents:

  • Agent version
  • Hostname & uptime
  • Platform information - Operating System and architecture
  • System capacity - total and available RAM & CPU

In addition, for support requests, we may need to collect the logs of the actuated agent process remotely from:

  • VMs launched for jobs, stored at /var/log/actuated/

This information is required to operate the control plane including scheduling of VMs and for technical support.

Upon cancelling a subscription, a customer may request that their data is deleted. In addition, they can uninstall the GitHub App from their organisation, and deactivate the GitHub OAuth application used to authenticate to the Actuated Dashboard.

"},{"location":"faq/#data-security-encryption","title":"Data security & encryption","text":"

TLS is enabled on the actuated control plane, the dashboard and on each agent. The TLS certificates have not expired and and have no known issues.

Each customer is responsible for hosting their own Servers and installing appropriate firewalls or access control.

Each Customer Server requires a unique token which is encrypted using public key cryptography, before being shared with OpenFaaS Ltd. This token is used to authenticate the agent to the control plane.

Traffic between the control plane and Customer Server is only made over HTTPS, using TLS encryption and API tokens. In addition, the token required for GitHub Actions is double encrypted with an RSA key pair, so that only the intended agent can decrypt and use it. These tokens are short-lived and expire after 59 minutes.

Event data recorded from GitHub Actions is stored and used to deliver quality of service and scheduling. This data is stored on a server managed by DigitalOcean LLC in the United Kingdom. The control plane is hosted with Linode LLC in the United Kingdom.

No data is shared with third parties.

"},{"location":"faq/#software-development-life-cycle","title":"Software Development Life Cycle","text":"
  • A Version Control System (VCS) is being Used - GitHub is used by all employees to store code
  • Only Authorized Employees Access Version Control - multiple factor authentication (MFA) is required by all employees
  • Only Authorized Employees Change Code - no changes can be pushed to production without having a pull request approval from senior management
  • Production Code Changes Restricted - Only authorized employees can push orm make changes to production code
  • All changes are documented through pull requests tickets and commit messages
  • Vulnerability management - vulnerability management is provided by GitHub.com. Critical vulnerabilities are remediated in a timely manner

Terminated Employee Access Revoked Within One Business Day - all access to source control management and production systems is revoked within one business day of an employee leaving the company.

Access to corporate network, production machines, network devices, and support tools requires a unique ID. This ID is only issued to employees and is revoked upon termination.

Policies Cover Employee Confidentiality - OpenFaaS Ltd policies require employees to keep confidential any information they learn while handling customer data.

"},{"location":"faq/#contact-information-available-to-customers","title":"Contact Information Available to Customers","text":"

OpenFaaS Ltd has provided an email address in a customer-accessible support documentation where support contact information is readily available. Users are encouraged to contact appropriate OpenFaaS Ltd if they become aware of items such as operational or security failures, incidents, system problems, concerns, or other issues/complaints.

"},{"location":"faq/#reliability-and-uptime","title":"Reliability and uptime","text":"

Authorized users have access to centralised logging endpoints, to query the logs of the Actuated agent installed on Customer Servers, ad-hoc, for the purpose of support and troubleshooting.

Authorized users have access to alerts, dashboards and may use this data to improve the service, or to proactively contact customers when there is a suspected issue.

Centralised monitoring and metrics gathered from the control plane have a 14-day retention period, after which data is automatically deleted.

"},{"location":"install-agent/","title":"Add your first agent to actuated","text":"

actuated is split into three parts:

  1. An Actuated Agent (agent) that you run on your own machines or VMs (server), which can launch a VM with a single-use GitHub Actions runner.
  2. A VM image launched by the agent, with all the preinstalled software found on a hosted GitHub Actions runner.
  3. Our own control plane that talks to GitHub on your behalf, and schedules builds across your fleet of agents.

All we need you to do is to install our agent on one or more servers, then we take care of the rest. We'll even be able to tell you if your server goes offline for any reason.

Have you registered your organisation yet?

Before you can add an agent, you or your GitHub organisation admin will need to install the: Actuated GitHub App.

"},{"location":"install-agent/#pick-your-actuated-servers","title":"Pick your Actuated Servers","text":"

Pick your Actuated Servers carefully using our guide: Pick a host for actuated

"},{"location":"install-agent/#review-the-end-user-license-agreement-eula","title":"Review the End User License Agreement (EULA)","text":"

Make sure you've read the Actuated EULA before registering your organisation with the actuated GitHub App, or starting the agent binary on one of your hosts.

If you missed it in the \"Provision a Server\" page, we recommend you use Ubuntu 22.04 as the host operating system on your Server.

"},{"location":"install-agent/#install-the-actuated-agent","title":"Install the Actuated Agent","text":"

Do you want a free, expert installation?

Our team can install the agent and configure the server for you. Just request our public SSH key, and add it to .ssh/authorized_keys and create a DNS A or CNAME record for your server, and send all the details over to us on the Actuated Slack.

Alternatively, you can run through the semi-automatic installation with the details below.

  1. Install your license for actuated

    The license is available in the email you received when you purchased your subscription. If someone else bought the subscription, they should be able to forward it to you.

    Run the following, then paste in your license, hit enter once, then Control + D to save the file.

    mkdir -p ~/.actuated\ncat > ~/.actuated/LICENSE\n
  2. Download the Actuated Agent and installation script to the server

    Setting up an ARM64 agent? Wherever you see agent in a command, change it to: agent-arm64. So instead of agent keygen you'd run agent-arm64 keygen.

    Install arkade using the command below, or download it from the releases page.

    Download the latest agent and install the binary to /usr/local/bin/:

    (\n# Install arkade\ncurl -sLS https://get.arkade.dev | sudo sh\n\n# Use arkade to extract the agent from its OCI container image\narkade oci install ghcr.io/openfaasltd/actuated-agent:latest --path ./agent\nchmod +x ./agent/agent*\nsudo mv ./agent/agent* /usr/local/bin/\n)\n

    Run the setup.sh script which will install all the required dependencies like containerd, CNI and Firecracker.

    For best performance, a dedicated drive, volume or partition is required to store the filesystems for running VMs. If you do not have a volume or extra drive attached, then you can shrink the root partition, and use the resulting free space.

    (\ncd agent\nVM_DEV=/dev/nvme0n2 sudo -E ./install.sh\n)\n

    If you do not have additional storage available at this time, the installer will generate a loopback filesystem for you.

    (\ncd agent\nsudo -E ./install.sh\n)\n
  3. Generate your enrollment file

    You'll need to create a DNS A or CNAME record for each server you add to actuated, this could be something like server1.example.com for instance.

    Run the following to create an enrollment file at $HOME/.actuated/agent.yaml:

    For an Arm server run agent-arm64 instead of agent

    agent enroll --url https://server1.example.com\n

    The enrollment file contains:

    • The hostname of the server
    • The public key of the agent which we use to encrypt tokens sent to the agent to bootstrap runners to GitHub Actions
    • A unique API token encrypted with our public key, which is used by the control plane to authenticate each message sent to the agent
  4. Configure and start the agent

    Use the install-service command to configure and install a systemd service to start the actuated agent.

    The actuated control plane will only communicate with agents exposed over HTTPS to ensure proper encryption is in place. An API token is used in addition with the TLS connection for all requests.

    Any bootstrap tokens sent to the agent are further encrypted with the agent's public key.

    For hosts with public IPs, you will need to use the built-in TLS provisioning with Let's Encrypt. For hosts behind a firewall, NAT or in a private datacenter, you can use inlets to create a secure tunnel to the agent.

    We're considering other models for after the pilot, for instance GitHub's own API has the runner make an outbound connection and uses long-polling.

    These steps are for hosts with public IP addresses, if you want to use inlets, jump to the end of this step.

    The easiest way to configure everything is to run as root. The --user flag can be used to run under a custom user account, however sudo access is still required for actuated.

    For an x86_64 server, run:

    DOMAIN=agent1.example.com\n\nsudo -E agent install-service \\\n--letsencrypt-domain $DOMAIN \\\n--letsencrypt-email webmaster@$DOMAIN\n

    For an Arm server, run:

    DOMAIN=agent1.example.com\n\nsudo -E agent-arm64 install-service \\\n--letsencrypt-domain $DOMAIN \\\n--letsencrypt-email webmaster@$DOMAIN\n

    Note the different binary name: agent-arm64

    If you need to make changes you can run the command again, or edit /etc/default/actuated.

    Check the service's status with:

    sudo systemctl status actuated\nsudo journalctl -u actuated --since today -f\n

    For an Actuated Agent behind a firewall, or on a private network, do not include the --letsencrypt-* flags, and instead add --listen-addr \"127.0.0.1:\". Then read expose the agent with HTTPS for details on our private peering option or how to setup an inlets tunnel.

    For example (with inlets):

    sudo -E agent install-service \\\n--listen-addr \"127.0.0.1:\"\n
  5. Check that the control-plane is accessible

    curl -i https://server1.example.com\n

    A correct response is a 403.

  6. Send us your agent's connection info

    Share the $HOME/.actuated/agent.yaml file with us so we can add your agent to the actuated control plane.

    We'll let you know once we've added your agent to actuated and then it's over to you to start running your builds.

    Once you've run our test build, you need to run the steps for systemd mentioned above.

"},{"location":"install-agent/#next-steps","title":"Next steps","text":"

You can now start your first build and see it run on your actuated agent.

Start a build on your agent

name: ci\n\non: push\n\njobs:\n build-golang:\n-    runs-on: ubuntu-latest\n+    runs-on: actuated-4cpu-16gb\n

The amount of RAM and CPU can be picked independently.

For Arm servers change the prefix from actuated- to actuated-arm64:

name: ci\n\non: push\n\njobs:\n build-golang:\n-    runs-on: ubuntu-latest\n+    runs-on: actuated-arm64-8cpu-32gb\n

You can also specify actuated-any-4cpu-8gb if you don't mind whether the job runs on one of your amd64 or arm64 servers.

"},{"location":"install-agent/#other-considerations","title":"Other considerations","text":"

If you'd like to install a firewall, ufw should be relatively quick to configure.

You will need the following ports open:

  • 443 - the encrypted control plane for actuated
  • 80 - used with Let's Encrypt to obtain a TLS certificate during the HTTP01 challenge
  • 22 - we recommend leaving port 22 open so that you can log into the machine with SSH, if needed. You could also change this to a high or random port

We do not recommend restricting outgoing traffic on the server as this will probably cause you issues with your builds.

See also: Troubleshooting your agent

"},{"location":"provision-server/","title":"Provision a Server","text":""},{"location":"provision-server/#provision-a-server-for-actuated","title":"Provision a Server for actuated","text":"

You'll need to provision a Server which is capable of virtualisation with Linux KVM. Each of your builds will run in an isolated microVM, with its own networking, Kernel and immutable filesystem.

We have done extensive research and testing both independently and with our customers. The recommendations on this page are updated regularly. We recommend bare-metal for the best performance, but cloud VMs which support nested virtualisation are also an option.

Did you know? Bare-metal servers from European providers are available from 50-150 EUR / mo. Using your own hardware can also be really cost effective.

So what makes one server quicker than another?

  • CPU Clock speed - the base and turbo speeds affect how some builds perform like Go and Rust
  • Core core - The amount of vCPU allocated to a build affects multi-processing
  • RAM and disk space - tune these to your needs to prevent builds slowing down
  • Generation of hardware - hosted runners may use obsolete hardware, you can use the latest generation
  • Network bandwidth - how quickly images, artifacts and caches will be transferred
  • Storage - NVMe is the only viable option for high performance builds
  • Multi-tenancy - are other customers contenting for the same resources, or is the server dedicated to your team?

What Operating System (OS) should I use?

The certified Operating System for an Actuated server is: Ubuntu Server 22.04.

"},{"location":"provision-server/#how-many-vms-or-jobs-can-a-server-run","title":"How many VMs or jobs can a server run?","text":"

Depending on the level of concurrency in your plan, each server will be able to run a set number of jobs. So we suggest dividing the RAM and CPU threads between them. For instance, if your server has 32 threads and 128GB of RAM, you could allocate 6 vCPU and 25 GB of RAM to each job for 5x jobs in parallel, or 4x vCPU and 12GB of RAM for 10x jobs in parallel.

In addition, you can also specify vCPU and RAM requirements on a per-job basis by changing the runs-on: actuated label to: runs-on: actuated-2cpu-8gb and so forth. This is useful for when you have a particular jobs which needs a lot of resources like building Kernels, Kubernetes/Docker E2E tests and browser testing.

"},{"location":"provision-server/#just-tell-me-what-i-need","title":"Just tell me what I need","text":"

For the absolute best value in terms of performance and cost, we recommend the following options from Hetzner's Dedicated range:

  • x86_64 - Hetzner's A102
  • Arm64 - Hetzner's RX170

Servers on Hetnzer arrive with a \"rescue\" system, use it to install Ubuntu 22.04, and make sure you disable software RAID, so that the two NVMe drives are presented as separate devices. One will run the system, the other will be used for filesystems for all the VMs.

"},{"location":"provision-server/#our-research-on-servers-for-actuated","title":"Our research on servers for actuated","text":"

Want us to recommend a server?

There's a lot of options when it comes to picking a server. On the onboarding call, we can help you find a match for your performance requirements, budget, and vendor preferences.

"},{"location":"provision-server/#intelamd","title":"Intel/AMD","text":"

1000 USD free credit for bare-metal

Equinix Metal have partnered with us to offer 1000 USD of credit for new customers to use on actuated. This will cover your usage for one month using an AMD Epyc server. You can request the discount code after purchasing your actuated subscription.

Intel and AMD CPUs can be used interchangeable and are known as amd64 or x86_64.

  1. Bare-metal on the cloud (higher cost, convenient, high performance)

    Bare-metal doesn't have to mean managing hardware in your own physical datacenter. You can deploy machines by API, pay-as-you-go and get the highest performance available.

    Bear in mind that whilst the cost of bare-metal is higher than VMs, you will be able to pack more builds into them and get better throughput since actuated can schedule builds much more efficiently than GitHub's self-hosted runner.

    We have seen the best performance from hosts with high clock speeds like the recent generation of AMD processors, combined with local NVMe storage. Rotational drives and SATA SSDs are significantly slower. At the lower end of bare-metal providers, you'll pay 40-50 EUR / mo per host, moving up to 80-150 EUR / mo for NVMe and AMD processors, when you go up to enterprise-grade bare-metal with 10Gbit uplinks, you'll be more in the range of 500-1500 USD / mo.

    Some providers have a setup fee, a one-month commitment, or they don't have an API/automated way to order machines. This coupled with the low costs and capacity of bare-metal means autoscaling servers is usually unnecessary.

    There are at least a dozen options for hosted bare-metal servers:

    • Alibaba Cloud
    • AWS - untenable pricing for bare-metal servers
    • Berry Byte - US region available
    • Cherry Servers
    • Equinix Metal - 500 USD free credit
    • fasthosts
    • Glesys
    • Hetzner - Region: Germany or Finland
    • Ionos - UK based
    • latitude.sh - EU and US region available
    • OVHcloud - EU and US regions available
    • PhoenixNAP - US and EU regions available
    • Scaleway - France region
    • Vultr

    You can see a separate list here.

    A note on Scaleway: Having tested several of Scaleway bare-metal offerings, we do not recommend their current generation of bare-metal due to slow I/O and CPU speeds.

    Equinix Metal have partnered with us to offer 500 USD of credit for new customers to use on actuated. You'll get the discount code after signing up with us. We've tested their c3.small.x86 and c2.small.x86 machines, and they are very fast, with enterprise-grade networking and support included, with many different regions available.

    Are you on a budget or looking to cut costs? Both Ionos (UK) and Hetzner (Germany) have excellent value, with NVMe storage very fast AMD CPUs available.

    Hetzner have a minimum commitment of one month, and most of the time will also charge a one-time setup fee. We recommend their AX-Line with NVMe and ECC RAM - for instance the AX41-NVME, AX52, or AX102. The best machine on offer is the AX161 which also has a fast delivery time.

  2. Cloud Virtual Machines (VMs) with nested virtualization (lowest cost, convenient, mid-level performance)

    This option may not have the raw speed and throughput of a dedicated, bare-metal host, but keeps costs low and is convenient for getting started.

    We know of at least three providers which have options for nested virtualisation: DigitalOcean, Google Compute Platform (GCP) (new customers get 300 USD free credits from GCP) support nested virtualisation on their Virtual Machines (VMs), and Azure.

  3. Bare-metal on-premises (cheap, convenient, high performance)

    Running bare-metal on-premises is a cost-effective and convenient way to re-use existing hardware investment.

    The machine could be racked in your server room, under your desk, or in a co-location datacenter.

    You can use inlets to expose your agent to actuated.

    Make sure you segment or isolate the agent into its own subnet, VLAN, DMZ, or VPC so that it cannot access the rest of your network. If you are thinking of running an actuated runner at home, we have suggested iptables rules that worked well for our own testing.

"},{"location":"provision-server/#arm","title":"Arm","text":"

64-bit Arm is also known as both aarch64 and arm64.

Arm CPUs are highly efficient when it comes to power consumption and pack in many more cores than the typical x86_64 CPU. This makes them ideal for running many builds in parallel. In typical testing, we've seen Arm builds running under emulation taking 35-45 minutes being reduced to 1-3 minutes total.

For Fluent Bit, a build that was failing after 6 hours using QEMU completed in just 4 minutes using actuated and an Ampere Altra server.

  1. Arm on-demand, in the cloud

    For ARM64, Hetzner provides outstanding value in their RX-Line with 128GB / 256GB RAM coupled with NVMe and 80 cores for around 200 EUR / mo. These are Ampere Altra Servers. There is a minimum commitment of one month, and an initial setup cost per server.

    We have several customers running Intel/AMD and Arm builds on Hetzner who have been very happy. Stock can take anywhere between hours, days or weeks to be delivered, and could run out, so check their status page before ordering.

    Glesys have the Ampere Altra Q80-26 available for roughly \u20ac239 / mo. They are a very similar price to Hetzner and are based in Sweden.

    PhoenixNAP just started to stock the Ampere Altra Q80-30 as of June 2023. These can be bought on a commitment of hourly, monthly or annually with a varying discount. The range was between 600-700 USD / mo.

    Following on from that, you have the a1.metal instance on AWS with 16 cores and 30GB / RAM for roughly 0.4 USD / hour, and roughly half that cost with a 1x year reservation. The a1.metal is the first generation of Graviton and in our testing with customers came up quite a bit slower than Ampere or Graviton 3. On the plus side, these machines are cheap and if you're already on AWS, it may be easier to start with. GP3 volumes or provisioned concurrency may increase performance over the default of GP2 volumes. Reach out to us for more information.

    For responsive support, faster uplinks, API-provisioning, per-minute billing and enterprise-grade networking, take a look at the c3.large.arm64 (Ampere Altra) from Equinix Metal. These machines come in at around 2.5 USD / hour, but are packed out with many cores and other benefits. You can usually provision these servers in the Washington DC and Dallas metros. Cloud Native Computing Foundation (CNCF) projects may be able to apply for free credits from Equinix Metal.

  2. Arm for on-premises

    For on-premises ARM64 builds, we recommend the Mac Mini M1 (2020) with 16GB RAM and 512GB storage with Asahi Linux. The M2 is unable to run Linux at this time.

    Ampere and their partners also offer 1U and 2U servers, along with and desktop-form workstations which can be racked or installed in your office.

    The Raspberry Pi 4 also works when used with an external NVMe, and in one instance was much faster than using emulation with a Hosted GitHub Runner.

  3. Arm VMs with nested virtualisation

    The current generations of Arm CPUs available from cloud providers do not support KVM, or nested virtualisation, which means you need to pick from the previous two options.

    There are Arm VMs available on Azure, GCP, and Oracle OCI. We have tested each and since they are based on the same generation of Ampere Altra hardware, we can confirm that they do not have KVM available and will not work for running actuated builds.

"},{"location":"provision-server/#want-to-talk-to-us","title":"Want to talk to us?","text":"

Still not sure which option is right for your team? Get in touch with us on the Actuated Slack and we'll help you decide.

"},{"location":"provision-server/#next-steps","title":"Next steps","text":"

Now that you've created a Server or VM with the recommended Operating System, you'll need to install the actuated agent and get in touch with us, to register it.

  • Install the Actuated Agent
"},{"location":"register/","title":"Register your GitHub Organisation","text":"

Actuated is a managed service, where you plug in your own servers, and we do everything else needed to create and manage self-hosted runners for your GitHub Actions.

Plans are paid monthly, without any minimum commitment.

"},{"location":"register/#what-youll-need","title":"What you'll need","text":"
  • A GitHub organisation
  • One or more public or private repositories hosted in the organisation
  • Administrative access to install the actuated GitHub App
  • A company credit-card to pay for your subscription
  • One or more bare-metal servers (we'll recommend the best fit for you during onboarding)
"},{"location":"register/#well-guide-you-through-the-process","title":"We'll guide you through the process","text":"

We'll walk you through the onboarding process and answer all your questions, so that you can be up and running straight away.

We've now run over 245,000 VMs for commercial teams, and there's very little for you to do to get started, in most cases, we've seen a 2-3x speed up for x86_64 builds by switching one line in a workflow: runs-on: actuated-4cpu-16gb. For Arm builds, native hardware makes a night and day difference.

"},{"location":"register/#book-a-call-with-us","title":"Book a call with us","text":"

Some engineers hate talking to sales people. You're not alone, and this is not a sales call, or with sales people. Our pricing is public, and paid month to month by corporate card.

The purpose of a call is to understand your goals, and help you pick the best server hardware, hosting company, and subscription plan for your usage.

Before the call, generate a usage report with our open-source actions-usage tool for at least 7 days, and either send it over via email or share it with us on the call. It'll help us make a better recommendation.

Talk to us

"},{"location":"register/#install-the-github-app","title":"Install the GitHub App","text":"

An administrator from your GitHub organisation will need to install the actuated GitHub App. GitHub Apps provide fine-grained access controls for third-parties integrating with GitHub.

Learn more in the FAQ.

End User License Agreement (EULA)

Make sure you've read the Actuated EULA before registering your organisation with the actuated GitHub App.

  1. Click on the Actuated GitHub App
  2. Click Install app
  3. Select the organisation you want to install the Actuated app to
  4. Install the app on all repositories or select repositories

  5. Once installed you will will see the permissions and other configuration options for the Actuated GitHub App on your selected account. If you have multiple organisations, you will need to authorise each one.

To remove or update the Actuated GitHub app, navigate to \"Settings\" for your organisation and click on \"GitHub Apps\" in the left sidebar. Select the Actuated from the list and click \"Configure\".

"},{"location":"register/#next-steps","title":"Next steps","text":"

Now that you've installed the GitHub App, and picked a subscription plan:

  • Provision a server
"},{"location":"roadmap/","title":"Roadmap","text":"

Actuated is in a pilot phase, running builds for participating customers. The core experience is functioning and we are dogfooding it for actuated itself, OpenFaaS Pro and inlets.

Our goal with the pilot is to prove that there's market fit for a solution like this, and if so, we'll invest more time in automation, user experience, agent autoscaling, dashboards and other polish.

The technology being used is run at huge scale in production by AWS (on Lambda and Fargate) and GitHub (self-hosted runners use the same runner software and control plane).

We believe actuated solves a real problem making CI fast, efficient, and multi-arch.

If you'd like to try it out for your team, Register interest for the pilot.

Shipped

  • Firecracker MicroVM support for runners
  • Secure builds for both public and private repos
  • Fat VM image to match tooling installed by GitHub Actions
  • KinD support for runner's Kernel
  • K3s support for runner's Kernel
  • ARM64 support, including Raspberry Pi 4B
  • Efficient scheduling of jobs across fleet of agents
  • Samples for K3s/KinD/Matrix builds and OpenFaaS functions
  • Subscription plans delivered by Gumroad
  • API for reviewing connected agents and queue depth
  • Job event auditing for review via API
  • Documentation site with detailed GitHub Actions examples
  • Customer dashboard UI to show connected agents and build queue
  • Official website actuated.dev
  • Remote / automated update of agents via control plane
  • Blog feature on actuated.dev with news, tutorials and updates from our team
  • Performance testing for Ionos & Scaleway for cost effective AMD bare-metal
  • Daily build statistics on your dashboard
  • Docker cache directly on the Actuated Hosts (servers) for much faster builds and avoiding rate-limiting
  • Subscriptions: migration to LemonSqueezy for lower fees, and more payment options
  • Dashboard - animation on all data pages for better feedback when refreshing data
  • Detailed insights across your organisation on usage
  • Detailed insights across your repos
  • Detailed insights by committer
  • Integrated SSH debug for runners within dashboard and CLI
  • At a glance insights for the day's activity so far
  • CLI/API for remote logs of VMs and the actuated agent
  • CLI/API for restarting the agent and rebooting a server
  • Examples for using S3/Minio running on the server as an actions cache, instead of the default hosted cache within Azure
  • Specify a custom runner size for an individual workflow - i.e. actuated-8cpu-12gb
  • Specify actuated-any to run jobs on any available server whether amd64 or arm64, for architecture-agnostic workflows such as npm or for browser testing.
  • GPU pass-through for ML and AI workloads - Accelerate GitHub Actions with dedicated GPUs - Run AI models with ollama in CI with GitHub Actions
  • Linux Kernel 6.1 for 64-bit Arm
  • Burst above subscription concurrency for busy periods - Introducing burst billing and capacity for GitHub Actions

In progress:

  • Actuated for self-hosted GitLab. (see below section)

Coming next:

  • Linux Kernel 6.1 for x86_64
  • Support for private, self-hosted GitHub Enterprise Server (GHES) installations

Open for customer contributions:

  • Examples for setting up an apt/yum mirror for faster builds
  • Example for configuring two different Docker pull through registries instead of just one.

Under consideration:

  • Custom CA for self-hosted S3, Minio, Docker Registries, apt/yum mirrors, etc.
  • Summary of CPU/RAM/disk consumption of builds
  • Right-sizing of build VMs based upon prior build history
  • Automated agent installation and bootstrap
  • Actuated for Jenkins

Items marked under consideration are awaiting customer interest. Reach out to us if you'd like to see these features sooner.

Is there something else you need? If you're already a customer, contact us via the actuated Slack or Register for interest.

"},{"location":"roadmap/#actuated-for-gitlab","title":"Actuated for GitLab","text":"

Learn about the tech preview

Ready for use by customers:

  • Actuated integration with self-hosted GitLab CI either on-premises or on the cloud
  • Ephemeral one-time runners with their own dedicated Docker Daemon
  • Immutable VM image for each build, built with automation
  • Schedule jobs across multiple bare-metal hosts or VMs with KVM available
  • Custom VM size scheduling
  • Manual enrollment of of projects as required

Coming soon:

  • Automatic enrollment of of projects as required when the actuated tag has been added
  • actuated-cli integration
  • actuated dashboard - daily glance, runners and build queue

Coming later:

  • actuated dashboard - SSH debug, insights / reports

Register your interest if you'd like to talk to our team.

"},{"location":"test-build/","title":"Run a test build","text":"

Now that you've registered your GitHub organisation, created a server, and configured the agent, you're ready for a test build.

We recommend you run the following build without changes to confirm that everything is working as expected. After that, you can modify an existing build and start using actuated for your team.

The below steps should take less than 10 minutes.

"},{"location":"test-build/#create-a-repository-and-workflow","title":"Create a repository and workflow","text":"

This build will show you the specs, OS and Kernel name reported by the MicroVM.

Note that if you're running on an Arm server, You'll adapt the prefix of runs-on: actuated-, to runs-on: actuated-arm64- instead.

  1. Create a test repository and a GitHub Action

    Create ./.github/workflows/ci.yaml:

    name: CI\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n- main\nworkflow_dispatch:\n\njobs:\nspecs:\nname: specs\n# runs-on: actuated-arm64-2cpu-8gb\nruns-on: actuated-arm64-2cpu-8gb\nsteps:\n- uses: actions/checkout@v1\n- name: Check specs\nrun: |\n./specs.sh\n

    Note that the runs-on: field says actuated- and not ubuntu-latest. This is how the actuated control plane knows to send this job to your agent. There are no fixed sets of vCPU and RAM, you can make up your own combinations.

    Then add specs.sh to the root of the repository, and remember, that you must run chmod +x specs.sh afterwards to make it executable.

    #!/bin/bash\n\necho Information on main disk\ndf -h /\n\necho Memory info\nfree -h\n\necho Total CPUs:\necho CPUs: $(nproc)\n\necho CPU Model\ncat /proc/cpuinfo |grep -i \"Model\"|head -n 2\n\necho Kernel and OS info\nuname -a\n\necho Generally, KVM should not be available unless specifically enabled\nif ! [ -e /dev/kvm ]; then\necho \"/dev/kvm does not exist\"\nelse\necho \"/dev/kvm exists\"\nfi\n\necho OS\ncat /etc/os-release\n\necho Egress IP:\ncurl -s -L -S https://checkip.amazonaws.com\n\necho Speed test of Internet\nsudo pip install speedtest-cli\nspeedtest-cli\n\necho Checking Docker\ndocker run alpine:latest cat /etc/os-release\n

    Don't leave out this step!

    chmod +x ./specs.sh\n
  2. Hit commit, and watch the VM boot up.

    You'll be able to see the runners registered for your organisation on the Actuated Dashboard along with the build queue and stats for the current day's builds.

  3. If you're curious

    You can view the logs of the agent by logging into one of the Actuated Servers with SSH and running the following commands:

    sudo journalctl -u actuated -f -o cat\n\n# Just today's logs:\nsudo journalctl -u actuated --since today -o cat\n

    And each VM writes the logs from its console and the GitHub Actions Runner to /var/log/actuated/.

    sudo cat /var/log/actuated/*\n

Do you have any questions or comments? Feel free to reach out to us over Slack in the #onboarding channel.

"},{"location":"test-build/#enable-actuated-for-an-existing-repository","title":"Enable actuated for an existing repository","text":"

To add actuated to an existing repository, simply edit the workflow YAML file and change runs-on: to runs-on: actuated-4cpu-16gb and for Arm builds, change it to: runs-on: actuated-arm64-4cpu-16gb. The values for CPU and RAM can be customised to your needs, and there are no hard-coded, or fixed combinations, to allow for flexibility.

Learn more in Custom VM Sizes

"},{"location":"test-build/#recommended-enable-a-docker-hub-mirror","title":"Recommended: Enable a Docker Hub mirror","text":"

Do you use the Docker Hub in your builds? Any Dockerfile with a FROM that doesn't include a server name will be pulled from docker.io, and there are strict rate-limits for unauthenticated users.

  1. Option 1 - authenticate

    Run docker login or use the Docker Login Action just before you run Docker build or pull down any images with tooling like KinD

  2. Option 2 - use a cache/mirror

    Use our guide to Set up a registry cache and mirror - this uses less bandwidth and increases the speed of builds where images are already present in the cache.

"},{"location":"troubleshooting/","title":"Troubleshooting","text":""},{"location":"troubleshooting/#getting-support","title":"Getting support","text":"

All customers have access to a public Slack channel for support and collaboration.

Enterprise customers may also have an upgraded SLA for support tickets via email and access to a private Slack channel.

"},{"location":"troubleshooting/#billing-and-your-plan","title":"Billing and your plan","text":""},{"location":"troubleshooting/#change-your-credit-card","title":"Change your credit card","text":"

Sometimes credit card limits or virtual cards are used on a subscription. To change the credit card used for your subscription, click here: My Orders.

"},{"location":"troubleshooting/#upgrade-your-plan","title":"Upgrade your plan","text":"

If you'd like to upgrade your plan for more concurrent builds, a higher level of support or anything else, you can do so via the Lemon Squeezy dashboard, the additional amount will be applied pro-rata.

Update or review your plan

"},{"location":"troubleshooting/#the-actuated-dashboard","title":"The Actuated Dashboard","text":"

The first port of call should be the Actuated Dashboard where you can check the status of your agents and see the current queue of jobs.

For security reasons, an administrator for your GitHub Organisation will need to approve the Actuated Dashboard for access to your organisation before team members will be able to see any data. Send them the link for the dashboard, and have them specifically tick the checkbox for your organisation when logging in for the first time.

If you missed this step, have them head over to their Applications Settings page, click \"Authorized OAuth Apps\" and then \"Actuated Dashboard\". On this page, under \"Organization access\" they can click \"Grant\" for each of your organisations registered for actuated.

How to \"Grant\" access to the Dashboard.

Try a direct link here: Actuated Dashboard OAuth App

"},{"location":"troubleshooting/#a-job-is-stuck-as-queued","title":"A job is stuck as \"queued\"","text":"

If you're using a private repo and the job is queued, let us know on Slack and we'll check the audit database to try and find out why.

To remotely check the logs of the actuated service on a host, run the following:

actuated-cli agent-logs --owner ORG --host HOST [--age 20m]\n

Or you can also check /var/log/actuated/ for log files, tail -n 20 /var/log/actuated/*.txt should show you any errors that may have occurred on any of the VM boot-ups or runner registrations. Or check sudo journalctl -u actuated to see what's happening within the actuated service.

Since 2022, the main GitHub service and/or Actions has had a high number of partial or full outages.

Check the GitHub Status Page to make sure GitHub is fully operational, and bear in mind that there could be an issue, even if it hasn't been shown on that page yet.

You can schedule additional VMs to launch, one per queued job with the CLI:

actuated-cli repair --org OWNER\n

This command should not be run multiple times without contacting support first.

"},{"location":"troubleshooting/#you-pull-a-lot-of-large-images-from-the-docker-hub","title":"You pull a lot of large images from the Docker Hub","text":"

As much as we like to make our images as small as possible, sometimes we just have to pull down either large artifacts or many smaller ones. It just can't be helped.

Since a MicroVM is a completely immutable environment, the pull needs to happen on each build, which is actually a good thing.

The pull speed can be dramatically improved by using a registry mirror on each agent:

  • Example: Set up a registry mirror
"},{"location":"troubleshooting/#you-are-running-into-rate-limits-when-using-container-images-from-the-docker-hub","title":"You are running into rate limits when using container images from the Docker Hub","text":"

The Docker Hub implements stringent rate limits of 100 pulls per 6 hours, and 200 pulls per 6 hours if you log in. Pro accounts get an increased limit of 5000 pulls per 6 hours.

We've created simple instructions on how to set up a registry mirror to cache images on your Actuated Servers.

  • Example: Set up a registry mirror
"},{"location":"troubleshooting/#a-job-is-running-out-of-ram-or-needs-more-cores","title":"A job is running out of RAM or needs more cores","text":"

If you suspect a job is running out of RAM or would benefit from more vCPU, you can increase the allocation by changing the runs-on label, as follows:

-runs-on: actuated-8cpu-8gb\n+runs-on: actuated-8cpu-16gb\n

You must set both RAM and vCPU at the same time, in the order of CPU (given in a whole number) followed by RAM (specified in GB)

For arm64 builds, the format follows the same convention: actuated-arm64-8cpu-16gb.'

Bear in mind that if you set the RAM higher than the default, this may result in fewer concurrent VMs being scheduled on a single server.

The maximum amount of vCPU that can be set for a single job is 32 vCPU, this is an implementation detail of Firecracker and may change in the future.

To find out exactly how many resources are required, see our blog post on right sizing with the vmmeter tool.

"},{"location":"troubleshooting/#disk-space-is-running-out-for-a-job","title":"Disk space is running out for a job","text":"

The disk space allocated for jobs is 30GB by default, but this value can be increased. Contact the actuated team for instructions on how to do this.

A dedicated disk or partition should be allocated for your VMs, if that's not the case, contact us and we'll help you reconfigure the server.

"},{"location":"troubleshooting/#your-agent-has-been-offline-or-unavailable-for-a-significant-period-of-time","title":"Your agent has been offline or unavailable for a significant period of time","text":"

If your agent has been offline for a significant period of time, then our control plane will have disconnected it from its pool of available agents.

Contact us via Slack to have it reinstated.

"},{"location":"troubleshooting/#you-need-to-rotate-the-authentication-token-used-for-your-agent","title":"You need to rotate the authentication token used for your agent","text":"

There should not be many reasons to rotate this token, however, if something's happened and it's been leaked or an employee has left the company, contact us via email for the update procedure.

"},{"location":"troubleshooting/#you-need-to-rotate-your-privatepublic-keypair","title":"You need to rotate your private/public keypair","text":"

Your private/public keypair is comparable to an SSH key, although it cannot be used to gain access to your agent via SSH.

If you need to rotate it for some reason, please contact us by email as soon as you can.

"},{"location":"troubleshooting/#your-builds-are-slower-than-expected","title":"Your builds are slower than expected","text":"
  • Check free disk space (df -h)
  • Check for unattended updates/upgrades (ps -ef | grep unattended-upgrades) and (ps -ef | grep apt)

If you're using spinning disks, then consider switching to SSDs. If you're already using SSDs, consider using PCIe/NVMe SSDs.

Finally, we do have another way to speed up microVMs by attaching another drive or partition to your host. Contact us for more information.

"},{"location":"examples/custom-vm-size/","title":"Example: Custom VM sizes","text":"

Our team will have configured your servers so that they always launch a pre-defined VM size, this keeps the user experience simple and predictable.

However, you can also request a specific VM size with up to 32vCPU and as much RAM as is available in the server. vCPU can be over-committed safely, however over-committing on RAM is not advised because if all of the RAM is required, one of the running VMs may exit or be terminated.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4
"},{"location":"examples/custom-vm-size/#request-a-custom-vm-size","title":"Request a custom VM size","text":"

For a custom size just append -cpu- and -gb to the above labels, for example:

x86_64 example:

  • actuated-1cpu-2gb
  • actuated-4cpu-16gb

64-bit Arm example:

  • actuated-arm64-4cpu-16gb
  • actuated-arm64-32cpu-64gb

You can change vCPU and RAM independently, there are no set combinations, so you can customise both to whatever you like.

The upper limit for vCPU is 32.

Create a new file at: .github/workflows/build.yml and commit it to the repository.

name: specs\n\non: push\njobs:\nspecs:\nruns-on: actuated-1cpu-2gb\nsteps:\n- name: Print specs\nrun: |\nnproc\nfree -h\n

This will allocate 1x vCPU and 2GB of RAM to the VM. To run this same configuration for arm64, change runs-on to actuated-arm64-1cpu-2gb.

"},{"location":"examples/docker/","title":"Example: Kubernetes with KinD","text":"

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/docker/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

Try running a container to ping Google for 3 times:

name: build\n\non: push\njobs:\nping-google:\nruns-on: actuated-4cpu-16gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: Run a ping to Google with Docker\nrun: |\ndocker run --rm -i alpine:latest ping -c 3 google.com\n

Build a container with Docker:

name: build\n\non: push\njobs:\nbuild-in-docker:\nruns-on: actuated-4cpu-16gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: Build inlets-connect using Docker\nrun: |\ngit clone --depth=1 https://github.com/alexellis/inlets-connect\ncd inlets-connect\ndocker build -t inlets-connect .\ndocker images\n

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

"},{"location":"examples/github-actions-cache/","title":"Example: GitHub Actions cache","text":"

Jobs on Actuated runners start in a clean VM each time. This means dependencies need to be downloaded and build artifacts or caches rebuilt each time. Caching these files in the actions cache can improve workflow execution time.

A lot of the setup actions for package managers have support for caching built-in. See: setup-node, setup-python, etc. They require minimal configuration and will create and restore dependency caches for you.

If you have custom workflows that could benefit from caching the cache can be configured manually using the actions/cache.

Using the actions cache is not limited to GitHub hosted runners but can be used with self-hosted runners. Workflows using the cache action can be converted to run on Actuated runners. You only need to change runs-on: ubuntu-latest to runs-on: actuated.

"},{"location":"examples/github-actions-cache/#use-the-github-actions-cache","title":"Use the GitHub Actions cache","text":"

In this short example we will build alexellis/registry-creds. This is a Kubernetes operator that can be used to replicate Kubernetes ImagePullSecrets to all namespaces.

"},{"location":"examples/github-actions-cache/#enable-caching-on-a-supported-action","title":"Enable caching on a supported action","text":"

Create a new file at: .github/workflows/build.yaml and commit it to the repository.

name: build\n\non: push\n\njobs:\nbuild:\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@v3\nwith:\nrepository: \"alexellis/registry-creds\"\n- name: Setup Golang\nuses: actions/setup-go@v3\nwith:\ngo-version: ~1.19\ncache: true\n- name: Build\nrun: |\nCGO_ENABLED=0 GO111MODULE=on \\\ngo build -ldflags \"-s -w -X main.Release=dev -X main.SHA=dev\" -o controller\n

To configure caching with the setup-go action you only need to set the cache input parameter to true.

The cache is populated the first time this workflow runs. Running the workflow after this should be significantly faster because dependency files and build outputs are restored from the cache.

"},{"location":"examples/github-actions-cache/#manually-configure-caching","title":"Manually configure caching","text":"

If there is no setup action for your language that supports caching it can be configured manually.

Create a new file at: .github/workflows/build.yaml and commit it to the repository.

name: build\n\non: push\n\njobs:\nbuild:\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@v3\nwith:\nrepository: \"alexellis/registry-creds\"\n- name: Setup Golang\nuses: actions/setup-go@v3\nwith:\ngo-version: ~1.19\ncache: true\n- name: Setup Golang caches\nuses: actions/cache@v3\nwith:\npath: |\n~/.cache/go-build\n~/go/pkg/mod\nkey: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}\nrestore-keys: |\n${{ runner.os }}-go-\n- name: Build\nrun: |\nCGO_ENABLED=0 GO111MODULE=on \\\ngo build -ldflags \"-s -w -X main.Release=dev -X main.SHA=dev\" -o controller\n

The setup Setup Golang caches uses the cache action to configure caching.

The path parameter is used to set the paths on the runner to cache or restore. The key parameter sets the key used when saving the cache. A hash of the go.sum file is used as part of the cache key.

"},{"location":"examples/github-actions-cache/#further-reading","title":"Further reading","text":"
  • Checkout the list of actions/cache examples to configure caching for different languages and frameworks.
  • See our blog: Make your builds run faster with Caching for GitHub Actions
"},{"location":"examples/k3s/","title":"Example: Kubernetes with k3s","text":"

You may need to access Kubernetes within your build. K3s is a for-production, lightweight distribution of Kubernetes that uses fewer resources than upstream. k3sup is a popular tool for installing k3s.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/k3s/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

Note that it's important to make sure Kubernetes is responsive before performing any commands like running a Pod or installing a helm chart.

name: k3sup-tester\n\non: push\njobs:\nk3sup-tester:\nruns-on: actuated-4cpu-16gb\nsteps:\n- name: get arkade\nuses: alexellis/setup-arkade@v1\n- name: get k3sup and kubectl\nuses: alexellis/arkade-get@master\nwith:\nkubectl: latest\nk3sup: latest\n- name: Install K3s with k3sup\nrun: |\nmkdir -p $HOME/.kube/\nk3sup install --local --local-path $HOME/.kube/config\n- name: Wait until nodes ready\nrun: |\nk3sup ready --quiet --kubeconfig $HOME/.kube/config --context default\n- name: Wait until CoreDNS is ready\nrun: |\nkubectl rollout status deploy/coredns -n kube-system --timeout=300s\n- name: Explore nodes\nrun: kubectl get nodes -o wide\n- name: Explore pods\nrun: kubectl get pod -A -o wide\n

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

"},{"location":"examples/kernel/","title":"Example: Test that compute time by compiling a Kernel","text":"

Use this sample to test the raw compute speed of your hosts by building a Kernel.

Certified for:

  • x86_64

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/kernel/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

name: microvm-kernel\n\non: push\njobs:\nmicrovm-kernel:\nruns-on: actuated\nsteps:\n- name: free RAM\nrun: free -h\n- name: List CPUs\nrun: nproc\n- name: get build toolchain\nrun: |\nsudo apt update -qy\nsudo apt-get install -qy \\\ngit \\\nbuild-essential \\\nkernel-package \\\nfakeroot \\\nlibncurses5-dev \\\nlibssl-dev \\\nccache \\\nbison \\\nflex \\\nlibelf-dev \\\ndwarves\n- name: clone linux\nrun: |\ntime git clone https://github.com/torvalds/linux.git linux.git --depth=1 --branch v5.10\ncd linux.git\ncurl -o .config -s -f https://raw.githubusercontent.com/firecracker-microvm/firecracker/main/resources/guest_configs/microvm-kernel-x86_64-5.10.config\necho \"# CONFIG_KASAN is not set\" >> .config\n- name: make config\nrun: |\ncd linux.git \nmake oldconfig\n- name: Make vmlinux\nrun: |\ncd linux.git\ntime make vmlinux -j$(nproc)\ndu -h ./vmlinux\n

When you have a build time, why not change runs-on: actuated to runs-on: ubuntu-latest to compare it to a hosted runner from GitHub?

Here's our test, where our own machine built the Kernel 4x faster than a hosted runner:

"},{"location":"examples/kind/","title":"Example: Kubernetes with KinD","text":"

You may need to access Kubernetes within your build. KinD is a popular option, and easy to run in an action.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/kind/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

Note that it's important to make sure Kubernetes is responsive before performing any commands like running a Pod or installing a helm chart.

name: build\n\non: push\njobs:\nstart-kind:\nruns-on: actuated-4cpu-16gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: get arkade\nuses: alexellis/setup-arkade@v1\n- name: get kubectl and kubectl\nuses: alexellis/arkade-get@master\nwith:\nkubectl: latest\nkind: latest\n- name: Create a KinD cluster\nrun: |\nmkdir -p $HOME/.kube/\nkind create cluster --wait 300s\n- name: Wait until CoreDNS is ready\nrun: |\nkubectl rollout status deploy/coredns -n kube-system --timeout=300s\n- name: Explore nodes\nrun: kubectl get nodes -o wide\n- name: Explore pods\nrun: kubectl get pod -A -o wide\n- name: Show kubelet logs\nrun: docker exec kind-control-plane journalctl -u kubelet\n

To run this on ARM64, just change the actuated prefix from actuated- to actuated-arm64-.

"},{"location":"examples/kind/#using-a-registry-mirror-for-kind","title":"Using a registry mirror for KinD","text":"

Whilst the instructions for a registry mirror work for Docker, and for buildkit, KinD uses its own containerd configuration, so needs to be configured separately, as required.

When using KinD, if you're deploying images which are hosted on the Docker Hub, then you'll probably need to either: authenticate to the Docker Hub, or configure the registry mirror running on your server.

Here's an example of how to create a KinD cluster, using a registry mirror for the Docker Hub:

#!/bin/bash\n\nkind create cluster --wait 300s --config /dev/stdin <<EOF\nkind: Cluster\napiVersion: kind.x-k8s.io/v1alpha4\ncontainerdConfigPatches:\n- |-\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n    endpoint = [\"http://192.168.128.1:5000\"]\nEOF\n

With open source projects, you may need to run the build on GitHub's hosted runners some of the time, in which case, you can use a check whether the mirror is available:

curl -f --connect-timeout 0.1 -s http://192.168.128.1:5000/v2/_catalog &> /dev/null\n\nif [ \"$?\" == \"0\" ]\nthen\necho \"Mirror found, configure KinD for the mirror\"\nelse\necho \"Mirror not found, use defaults\"\nfi\n

To use authentication instead, create a Kubernetes secret of type docker-registry and then attach it to the default service account of each namespace within your cluster.

The OpenFaaS docs show how to do this for private registries, but the same applies for authenticating to the Docker Hub to raise rate-limits.

You may also like Alex's alexellis/registry-creds project which will replicate your Docker Hub credentials into each namespace within a cluster, to make sure images are pulled with the correct credentials.

"},{"location":"examples/kvm-guest/","title":"Example: Run a KVM guest","text":"

It is possible to launch a Virtual Machine (VM) within a GitHub Action. Support for virtualization is not enabled by default for Actuated. The Agent has to be configured to use a custom kernel.

There are some prerequisites to enable KVM support:

  • aarch64 runners are not supported at the moment.
  • A bare-metal host for the Agent is required.

Nested virtualisation is a premium feature

This feature requires a plan size of 15 concurrent builds or greater, however you can get a 14-day free trial by contacting our team directly through the actuated Slack.

"},{"location":"examples/kvm-guest/#configure-the-agent","title":"Configure the Agent","text":"
  1. Make sure nested virtualization is enabled on the Agent host.

  2. Edit /etc/default/actuated on the Actuated Agent and add the kvm suffix to the AGENT_KERNEL_REF variable:

    - AGENT_KERNEL_REF=\"ghcr.io/openfaasltd/actuated-kernel:x86_64-latest\"\n+ AGENT_KERNEL_REF=\"ghcr.io/openfaasltd/actuated-kernel:x86_64-kvm-latest\"\n
  3. Also add it to the AGENT_IMAGE_REF line:

    - AGENT_IMAGE_REF=\"ghcr.io/openfaasltd/actuated-ubuntu22.04:x86_64-latest\"\n+ AGENT_IMAGE_REF=\"ghcr.io/openfaasltd/actuated-ubuntu22.04:x86_64-kvm-latest\"\n
  4. Restart the Agent to use the new kernel.

    sudo systemctl daemon-reload && \\\nsudo systemctl restart actuated\n
  5. Run a test build to verify KVM support is enabled in the runner. The specs script from the test build will report whether /dev/kvm is available.

"},{"location":"examples/kvm-guest/#run-a-firecracker-microvm","title":"Run a Firecracker microVM","text":"

This example is an adaptation of the Firecracker quickstart guide that we run from within a GitHub Actions workflow.

The workflow instals Firecracker, configures and boots a guest VM and then waits 20 seconds before shutting down the VM and exiting the workflow.

  1. Create a new repository and add a workflow file.

    The workflow file: ./.github/workflows/vm-run.yaml:

    name: vm-run\n\non: push\njobs:\nvm-run:\nruns-on: actuated-4cpu-8gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: Install arkade\nuses: alexellis/setup-arkade@v2\n- name: Install firecracker\nrun: sudo arkade system install firecracker\n- name: Run microVM\nrun: sudo ./run-vm.sh\n
  2. Add the run-vm.sh script to the root of the repository.

    Running the script will:

    • Get the kernel and rootfs for the microVM
    • Start fireckracker and configure the guest kernel and rootfs
    • Start the guest machine
    • Wait for 20 seconds and kill the firecracker process so workflow finishes.

    The run-vm.sh script:

    #!/bin/bash\n\n# Clone the example repo\ngit clone https://github.com/skatolo/nested-firecracker.git\n\n# Run the VM script\n./nested-firecracker/run-vm.sh 
  3. Hit commit and check the run logs of the workflow. You should find the login prompt of the running microVM in the logs.

The full example is available on GitHub

For more examples and use-cases see:

  • How to run a KVM guest in your GitHub Actions
"},{"location":"examples/matrix-k8s/","title":"Example: Regression test against various Kubernetes versions","text":"

This example launches multiple Kubernetes clusters in parallel for regression and end to end testing.

In the example, We're testing the CRD for the inlets-operator on versions v1.16 through to v1.25. You could also switch out k3s for KinD, if you prefer.

See also: Actuated with KinD

Launching 10 Kubernetes clusters in parallel across your fleet of Actuated Servers.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/matrix-k8s/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

Customise both the array \"k3s\" with the versions you need to test and replace the step \"Test crds\" with whatever you need to install such as helm charts.

name: k3s-test-matrix\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n- main\n\njobs:\nkubernetes:\nname: k3s-test-${{ matrix.k3s }}\nruns-on: actuated-4cpu-12gb\nstrategy:\nmatrix:\nk3s: [v1.16, v1.17, v1.18, v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25]\n\nsteps:\n- uses: actions/checkout@v1\n- uses: alexellis/setup-arkade@v2\n- uses: alexellis/arkade-get@master\nwith:\nkubectl: latest\nk3sup: latest\n\n- name: Create Kubernetes ${{ matrix.k3s }} cluster\nrun: |\nmkdir -p $HOME/.kube/\nk3sup install \\\n--local \\\n--k3s-channel ${{ matrix.k3s }} \\\n--local-path $HOME/.kube/config \\\n--merge \\\n--context default\ncat $HOME/.kube/config\n\nk3sup ready --context default\nkubectl config use-context default\n\n# Just an extra test on top.\necho \"Waiting for nodes to be ready ...\"\nkubectl wait --for=condition=Ready nodes --all --timeout=5m\nkubectl get nodes -o wide\n\n- name: Test crds\nrun: |\necho \"Applying CRD\"\nkubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/artifacts/crds/inlets.inlets.dev_tunnels.yaml\n

The matrix will cause a new VM to be launched for each item in the \"k3s\" array.

"},{"location":"examples/matrix/","title":"Example: matrix-build - run a VM per each job in a matrix","text":"

Use this sample to test launching multiple VMs in parallel.

Certified for:

  • x86_64
  • arm64 including Raspberry Pi 4

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/matrix/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new file at: .github/workflows/build.yml and commit it to the repository.

name: CI\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n- main\n\njobs:\narkade-e2e:\nname: arkade-e2e\nruns-on: actuated-4cpu-12gb\nstrategy:\nmatrix:\napps: [run-job,k3sup,arkade,kubectl,faas-cli]\nsteps:\n- name: Get arkade\nrun: |\ncurl -sLS https://get.arkade.dev | sudo sh\n- name: Download app\nrun: |\necho ${{ matrix.apps }}\narkade get ${{ matrix.apps }}\nfile /home/runner/.arkade/bin/${{ matrix.apps }}\n

The matrix will cause a new VM to be launched for each item in the \"apps\" array.

"},{"location":"examples/multiarch-buildx/","title":"Example: Multi-arch with buildx","text":"

A multi-arch or multi-platform container is effectively where you build the same container image for multiple different Operating Systems or CPU architectures, and link them together under a single name.

So you may publish an image named: ghcr.io/inlets-operator/latest, but when this image is fetched by a user, a manifest file is downloaded, which directs the user to the appropriate image for their architecture.

If you'd like to see what these look like, run the following with arkade:

arkade get crane\n\ncrane manifest ghcr.io/inlets/inlets-operator:latest\n

You'll see a manifests array, with a platform section for each image:

{\n\"mediaType\": \"application/vnd.docker.distribution.manifest.list.v2+json\",\n\"manifests\": [\n{\n\"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",\n\"digest\": \"sha256:bae8025e080d05f1db0e337daae54016ada179152e44613bf3f8c4243ad939df\",\n\"platform\": {\n\"architecture\": \"amd64\",\n\"os\": \"linux\"\n}\n},\n{\n\"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",\n\"digest\": \"sha256:3ddc045e2655f06653fc36ac88d1d85e0f077c111a3d1abf01d05e6bbc79c89f\",\n\"platform\": {\n\"architecture\": \"arm64\",\n\"os\": \"linux\"\n}\n}\n]\n}\n
"},{"location":"examples/multiarch-buildx/#try-an-example","title":"Try an example","text":"

This example is taken from the Open Source inlets-operator.

It builds a container image containing a Go binary and uses a Dockerfile in the root of the repository. All of the images and corresponding manifest are published to GitHub's Container Registry (GHCR). The action itself is able to authenticate to GHCR using a built-in, short-lived token. This is dependent on the \"permissions\" section and \"packages: write\" being set.

View publish.yaml, adapted for actuated:

name: publish\n\non:\n push:\n    tags:\n      - '*'\n\njobs:\n publish:\n+    permissions:\n+      packages: write\n\n-   runs-on: ubuntu-latest\n+   runs-on: actuated-4cpu-12gb\n   steps:\n      - uses: actions/checkout@master\n        with:\n          fetch-depth: 1\n\n+     - name: Setup mirror\n+       uses: self-actuated/hub-mirror@master\n     - name: Get TAG\n        id: get_tag\n        run: echo TAG=${GITHUB_REF#refs/tags/} >> $GITHUB_ENV\n      - name: Get Repo Owner\n        id: get_repo_owner\n        run: echo \"REPO_OWNER=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')\" > $GITHUB_ENV\n\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v3\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n      - name: Login to container Registry\n        uses: docker/login-action@v3\n        with:\n          username: ${{ github.repository_owner }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n          registry: ghcr.io\n\n      - name: Release build\n        id: release_build\n        uses: docker/build-push-action@v5\n        with:\n          outputs: \"type=registry,push=true\"\n          platforms: linux/amd64,linux/arm/v6,linux/arm64\n          build-args: |\n            Version=${{  env.TAG }}\n            GitCommit=${{ github.sha }}\n          tags: |\n            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:${{ github.sha }}\n            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:${{ env.TAG }}\n            ghcr.io/${{ env.REPO_OWNER }}/inlets-operator:latest\n

You'll see that we added a Setup mirror step, this explained in the Registry Mirror example

The docker/setup-qemu-action@v3 step is responsible for setting up QEMU, which is used to emulate the different CPU architectures.

The docker/build-push-action@v5 step is responsible for passing in a number of platform combinations such as: linux/amd64 for cloud, linux/arm64 for Arm servers and linux/arm/v6 for Raspberry Pi.

Within the Dockerfile, we needed to make a couple of changes.

You can pick to run the step in either the BUILDPLATFORM or TARGETPLATFORM. The BUILDPLATFORM is the native architecture and platform of the machine performing the build, this is usually amd64. The TARGETPLATFORM is important for the final step of the build, and will be injected based upon one each of the platforms you have specified in the step.

- FROM golang:1.22 as builder\n+ FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.22 as builder\n

For Go specifically, we also updated the go build command to tell Go to use cross-compilation based upon the TARGETOS and TARGETARCH environment variables, which are populated by Docker.

GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o inlets-operator\n

Learn more in the Docker Documentation: Multi-platform images

"},{"location":"examples/multiarch-buildx/#is-it-slow-to-build-for-arm","title":"Is it slow to build for Arm?","text":"

Using QEMU can be slow at times, especially when building an image for Arm using a hosted GitHub Runner.

We found that we could increase an Open Source project's build time by 22x - from ~ 36 minutes to 1 minute 26 seconds.

See also How to make GitHub Actions 22x faster with bare-metal Arm

To build a separate image for Arm on an Arm runner, and one for amd64, you could use a matrix build.

"},{"location":"examples/multiarch-buildx/#need-a-hand-with-github-actions","title":"Need a hand with GitHub Actions?","text":"

Check your plan to see if access to Slack is included, if so, you can contact us on Slack for help and guidance.

"},{"location":"examples/openfaas-helm/","title":"Example: Publish an OpenFaaS function","text":"

This example will create a Kubernetes cluster using KinD, deploy OpenFaaS using Helm, deploy a function, then invoke the function. There are some additional checks for readiness for Kubernetes and the OpenFaaS gateway.

You can adapt this example for any other Helm charts you may have for E2E testing.

We also recommend considering arkade for installing CLIs and common Helm charts for testing.

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

Certified for:

  • x86_64
  • arm64

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/openfaas-helm/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a new GitHub repository in your organisation.

Add: .github/workflows/e2e.yaml

name: e2e\n\non:\npush:\nbranches:\n- '*'\npull_request:\nbranches:\n- '*'\n\npermissions:\nactions: read\ncontents: read\n\njobs:\ne2e:\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: get arkade\nuses: alexellis/setup-arkade@v1\n- name: get kubectl and kubectl\nuses: alexellis/arkade-get@master\nwith:\nkubectl: latest\nkind: latest\nfaas-cli: latest\n- name: Install Kubernetes KinD\nrun: |\nmkdir -p $HOME/.kube/\nkind create cluster --wait 300s\n- name: Add Helm chart, update repos and apply namespaces\nrun: |\nkubectl apply -f https://raw.githubusercontent.com/openfaas/faas-netes/master/namespaces.yml\nhelm repo add openfaas https://openfaas.github.io/faas-netes/\nhelm repo update\n- name: Install the Community Edition (CE)\nrun: |\nhelm repo update \\\n&& helm upgrade openfaas --install openfaas/openfaas \\\n--namespace openfaas  \\\n--set functionNamespace=openfaas-fn \\\n--set generateBasicAuth=true\n- name: Wait until OpenFaaS is ready\nrun: |\nkubectl rollout status -n openfaas deploy/prometheus --timeout 5m\nkubectl rollout status -n openfaas deploy/gateway --timeout 5m\n- name: Port forward the gateway\nrun: |\nkubectl port-forward -n openfaas svc/gateway 8080:8080 &\n\nattempts=0\nmax=10\n\nuntil $(curl --output /dev/null --silent --fail http://127.0.0.1:8080/healthz ); do\nif [ ${attempts} -eq ${max} ]; then\necho \"Max attempts reached $max waiting for gateway's health endpoint\"\nexit 1\nfi\n\nprintf '.'\nattempts=$(($attempts+1))\nsleep 1\ndone\n- name: Login to OpenFaaS gateway and deploy a function\nrun: |\nPASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath=\"{.data.basic-auth-password}\" | base64 --decode; echo)\necho -n $PASSWORD | faas-cli login --username admin --password-stdin \n\nfaas-cli store deploy env\n\nfaas-cli invoke env <<< \"\"\n\ncurl -s -f -i http://127.0.0.1:8080/function/env\n\nfaas-cli invoke --async env <<< \"\"\n\nkubectl logs -n openfaas deploy/queue-worker\n\nfaas-cli describe env\n

If you'd like to deploy the function, check out a more comprehensive example of how to log in and deploy in Serverless For Everyone Else

"},{"location":"examples/openfaas-publish/","title":"Example: Publish an OpenFaaS function","text":"

This example will publish an OpenFaaS function to GitHub's Container Registry (GHCR).

  • The example uses Docker's buildx and QEMU for a multi-arch build
  • Dynamic variables to inject the SHA and OWNER name from the repo
  • Uses the token that GitHub assigns to the action to publish the containers.

You can also run this example on GitHub's own hosted runners.

Docker CE is preinstalled in the actuated VM image, and will start upon boot-up.

Certified for:

  • x86_64

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/openfaas-publish/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

For alexellis' repository called alexellis/autoscaling-functions, then check out the .github/workflows/publish.yml file:

  • The \"Setup QEMU\" and \"Set up Docker Buildx\" steps configure the builder to produce a multi-arch image.
  • The \"OWNER\" variable means this action can be run on any organisation without having to hard-code a username for GHCR.
  • Only the bcrypt function is being built with the --filter command added, remove it to build all functions in the stack.yml.
  • --platforms linux/amd64,linux/arm64,linux/arm/v7 will build for regular Intel/AMD machines, 64-bit Arm and 32-bit Arm i.e. Raspberry Pi, most users can reduce this list to just \"linux/amd64\" for a speed improvement

Make sure you edit runs-on: and set it to runs-on: actuated-4cpu-12gb

name: publish\n\non:\npush:\nbranches:\n- '*'\npull_request:\nbranches:\n- '*'\n\npermissions:\nactions: read\nchecks: write\ncontents: read\npackages: write\n\njobs:\npublish:\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@master\nwith:\nfetch-depth: 1\n- name: Get faas-cli\nrun: curl -sLSf https://cli.openfaas.com | sudo sh\n- name: Pull custom templates from stack.yml\nrun: faas-cli template pull stack\n- name: Set up QEMU\nuses: docker/setup-qemu-action@v3\n- name: Set up Docker Buildx\nuses: docker/setup-buildx-action@v3\n- name: Get TAG\nid: get_tag\nrun: echo ::set-output name=TAG::latest-dev\n- name: Get Repo Owner\nid: get_repo_owner\nrun: >\necho ::set-output name=repo_owner::$(echo ${{ github.repository_owner }} |\ntr '[:upper:]' '[:lower:]')\n- name: Docker Login\nrun: > echo ${{secrets.GITHUB_TOKEN}} | \ndocker login ghcr.io --username \n${{ steps.get_repo_owner.outputs.repo_owner }} \n--password-stdin\n- name: Publish functions\nrun: >\nOWNER=\"${{ steps.get_repo_owner.outputs.repo_owner }}\" \nTAG=\"latest\"\nfaas-cli publish\n--extra-tag ${{ github.sha }}\n--build-arg GO111MODULE=on\n--platforms linux/amd64,linux/arm64,linux/arm/v7\n--filter bcrypt\n

If you'd like to deploy the function, check out a more comprehensive example of how to log in and deploy in Serverless For Everyone Else

"},{"location":"examples/system-info/","title":"Example: Get system information about your microVM","text":"

This sample reveals system information about your runner.

Certified for:

  • x86_64
  • arm64

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"examples/system-info/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

Create a specs.sh file:

#!/bin/bash\n\necho Hostname: $(hostname)\n\necho whoami: $(whoami)\n\necho Information on main disk\ndf -h /\n\necho Memory info\nfree -h\n\necho Total CPUs:\necho CPUs: $(nproc)\n\necho CPU Model\ncat /proc/cpuinfo |grep \"model name\"\n\necho Kernel and OS info\nuname -a\n\nif ! [ -e /dev/kvm ]; then\necho \"/dev/kvm does not exist\"\nelse\necho \"/dev/kvm exists\"\nfi\n\necho OS info: $(cat /etc/os-release)\n\necho PATH: ${PATH}\n\necho Egress IP:\ncurl -s -L -S https://checkip.amazonaws.com\n

Create a new file at: .github/workflows/build.yml and commit it to the repository.

name: CI\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n\njobs:\nspecs:\nname: specs\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: actions/checkout@v1\n- name: Check specs\nrun: |\n./specs.sh\n

Note how the hostname changes every time the job is run.

"},{"location":"examples/system-info/#perform-a-basic-benchmark","title":"Perform a basic benchmark","text":"

Update the specs.sh file to include benchmarks for disk and network connection:

echo Installing hdparm\n\nsudo apt update -qqqqy && sudo apt install -qqqqy hdparm\n\necho Read speed\n\nsudo hdparm -t $(mount |grep \"/ \"|cut -d \" \" -f1)\n\necho Write speed\n\nsync; dd if=/dev/zero of=./tempfile bs=1M count=1024; sync\n\necho Where is this runner?\n\ncurl -s http://ip-api.com/json|jq\n\necho Information on main disk\n\ndf -h /\n\necho Public IP:\n\ncurl -s -L -S https://checkip.amazonaws.com\n\necho Checking speed\nsudo pip install speedtest-cli\nspeedtest-cli\n

For the fastest servers backed by NVMes, with VMs running on a dedicated drive, we tend to see:

  • Read speeds of 1000+ MB/s.
  • Write speeds of 1000+ MB/s.

The Internet speed test will give you a good idea of how quickly large artifacts can be uploaded or downloading during jobs.

The instructions for a Docker registry cache on the server can make using container images from public registries much quicker.

"},{"location":"tasks/cli/","title":"Actuated CLI","text":"

Monitor Actuated runners and jobs from the command line.

"},{"location":"tasks/cli/#installation","title":"Installation","text":"

Download and installation instruction are available via the actuated-dashboard

You'll need to run actuated-cli auth first, so that you can get a Personal Access Token with the appropriate scopes from GitHub.

"},{"location":"tasks/cli/#view-queued-jobs","title":"View queued jobs","text":"
actuated-cli jobs \\\nactuated-samples\n
"},{"location":"tasks/cli/#view-runners-for-organization","title":"View runners for organization","text":"
actuated-cli runners \\\nactuated-samples\n
"},{"location":"tasks/cli/#view-ssh-sessions-available","title":"View SSH sessions available:","text":"
actuated-cli ssh ls\n

Hosts are ordered by the connected time.

| NO  |   ACTOR   |                 HOSTNAME                 |  RX   |  TX   | CONNECTED |\n|-----|-----------|------------------------------------------|-------|-------|-----------|\n|   1 | alexellis | 6aafd53144e2f00ef5cd2c16681eeab4712561a6 | 13679 | 10371 | 6m4s      |\n|   2 | alexellis | fv-az268-245                             | 23124 | 13828 | 12m2s     |\n
"},{"location":"tasks/cli/#connect-to-an-ssh-session","title":"Connect to an SSH session","text":"

Connect to the first available session from your account:

actuated-cli ssh connect\n

Connected to the second session in the list:

actuated-cli ssh connect 2\n

Connect to a specific session by hostname:

actuated-cli ssh connect runner1\n

Connect to a specific session with a host prefix:

actuated-cli ssh connect 6aafd\n
"},{"location":"tasks/cli/#check-the-logs-of-vms","title":"Check the logs of VMs","text":"

View the serial console and systemd output of the VMs launched on a specific server.

  • Check for timeouts with GitHub's control-plane
  • View output from the GitHub runner binary
  • See boot-up messages
  • Check for errors if the GitHub Runner binary is out of date
actuated-cli logs \\\n--owner actuated-samples \\\n--age 15m \\\nrunner1\n

The age is specified as a Go duration i.e. 60m or 24h.

You can also get the logs for a specific runner by using the --id flag.

actuated-cli logs \\\n--owner actuated-samples \\\n--id ea5c285282620927689d90af3cfa3be2d5e2d004\n    runner1\n
"},{"location":"tasks/cli/#check-the-logs-of-the-actuated-agent-service","title":"Check the logs of the actuated agent service","text":"

Show the logs of the actuated agent binary running on your server.

View VM launch times, etc.

actuated-cli agent-logs \\\n--owner actuated-samples \\\n--age 60m \\\nrunner1\n
"},{"location":"tasks/cli/#schedule-a-repair-to-re-queue-jobs","title":"Schedule a repair to re-queue jobs","text":"

If a job has been retried for 30 minutes, without a runner to take it, it'll be taken off the queue. This command will re-queue all jobs that are in a \"queued\" state.

Run sparingly because it will launch one VM per job queued.

actuated-cli repair \\\nactuated-samples\n
"},{"location":"tasks/cli/#rescue-a-remote-server","title":"Rescue a remote server","text":"

Restart the agent by sending a kill -9 signal:

actuated-cli restart \\\n--owner actuated-samples \\\nrunner1\n

Any inflight VMs will be killed, see also: actuated-cli update --force

Reboot the machine, if in an unrecoverable position:

actuated-cli restart \\\n--owner actuated-samples \\\n--reboot\n    runner1\n

Use with caution, since this may not perform a safe and clean shutdown.

"},{"location":"tasks/cli/#json-mode","title":"JSON mode","text":"

Add --json to any command to get JSON output for scripting.

API rate limits apply, so do not run the CLI within a loop or watch command.

"},{"location":"tasks/cli/#help-support","title":"Help & support","text":"

Reach out to our team on Slack

"},{"location":"tasks/debug-ssh/","title":"Example: Debug a job with SSH","text":"

If your it's included within your actuated plan, then you can get a shell into any self-hosted runner - including GitHub's own hosted runners.

Certified for:

  • x86_64
  • arm64

Use a private repository if you're not using actuated yet

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

"},{"location":"tasks/debug-ssh/#try-out-the-action-on-your-agent","title":"Try out the action on your agent","text":"

You'll need to add the id_token: write permission to your workflow to use this action. It allows the action to authenticate with the SSH gateway using an GitHub Actions OIDC token.

Create a .github/workflows/workflow.yaml file

name: connect\n\non:\npush:\nbranches:\n- master\n- main\nworkflow_dispatch:\n\npermissions:\nid-token: write\ncontents: read\nactions: read\n\njobs:\nconnect:\nname: connect\nruns-on: actuated-4cpu-12gb\nsteps:\n- uses: self-actuated/connect-ssh@master\n

Next, trigger a build via the workflow_dispatch event or a git push to the master branch.

Open https://$SSH_GATEWAY/list in your browser and look for your session, you can log in using the SSH command outputted for you.

Alternatively, you can view your own SSH sessions from the actuated dashboard.

Whenever you have a build that you just can't figure out - or if you want to explore the runner and tune it up to your needs, then you can simply add - uses: self-actuated/connect-ssh@master where you want to pause the build.

To release the session run unblock or sudo reboot from the SSH session.

Watch a demo:

"},{"location":"tasks/local-github-cache/","title":"Run a local GitHub Cache","text":"

The cache for GitHub Actions can speed up CI/CD pipelines. Hosted runners are placed close to the cache which means the latency is very low. Self-hosted runners can also make good use of this cache. Just like caching container images on the host in a registry mirror, you can also get a speed boost over the hosted cache by running your own cache directly on the host.

To improve cache speeds with Actuated runners you can run a self-hosted S3 server and switch out the official actions/cache@v3 with tespkg/actions-cache@v1. The tespkg/actions-cache@v1 can target S3 instead of the proprietary GitHub cache.

You can run the cache on every actuated server for the speed of communicating over a loopback network, or you can run it on a single dedicated server that's placed in the same region as the actuated servers, which will still be very quick.

Note that if you have multiple actuated hosts consider running a single dedicated server for the cache. Subsequent jobs can be scheduled to different hosts so there is no guarantee the cache is populated when running a cache on every actuated server.

"},{"location":"tasks/local-github-cache/#set-up-an-s3-cache","title":"Set up an S3 cache","text":"

There are a couple of options to run a self-hosted S3 server, most notably Seaweedfs and Minio.

This guide will cover the setup of Seaweedfs but any S3 compatible service will work in a very similar way.

"},{"location":"tasks/local-github-cache/#install-seaweedfs","title":"Install Seaweedfs","text":"

Seaweedfs is distributed as a static Go binary, so it can be installed with arkade, or from the GitHub releases page.

arkade get seaweedfs\nsudo mv ~/.arkade/bin/seaweedfs /usr/local/bin\n

Define a secret key and access key to be used from the CI jobs in the /etc/seaweedfs/s3.conf file.

Generate a secret key: openssl rand -hex 16 > secret_key

export ACCESS_KEY=\"\" # Replace with your access key\nexport SECRET_KEY=\"$(cat ~/secret_key)\"\n\ncat >> /tmp/s3.conf <<EOF\n{\n  \"identities\": [\n    {\n      \"name\": \"actuated\",\n      \"credentials\": [\n        {\n          \"accessKey\": \"$ACCESS_KEY\",\n          \"secretKey\": \"$SECRET_KEY\"\n        }\n      ],\n      \"actions\": [\n        \"Admin\",\n        \"Read\",\n        \"List\",\n        \"Tagging\",\n        \"Write\"\n      ]\n    }\n  ]\n}\nEOF\n\nmkdir -p /etc/seaweedfs\nsudo mv /tmp/s3.conf /etc/seaweedfs/s3.conf\n

Install and start Seaweedfs with a systemd unit file:

(\ncat >> /tmp/seaweedfs.service <<EOF\n[Unit]\nDescription=SeaweedFS\nAfter=network.target\n\n[Service]\nUser=root\nExecStart=/usr/local/bin/seaweedfs server -ip=192.168.128.1 -volume.max=0 -volume.fileSizeLimitMB=2048 -dir=/home/runner-cache -s3 -s3.config=/etc/seaweedfs/s3.conf\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nmkdir -p /home/runner-cache\nsudo mv /tmp/seaweedfs.service /etc/systemd/system/seaweedfs.service\nsudo systemctl daemon-reload\nsudo systemctl enable seaweedfs --now\n)\n

We have set -volume.max=0 -volume.fileSizeLimitMB=2048 to minimize the amount of space used and to allow large zip files of up to 2GB, but you can change this to suit your needs. See seaweedfs server --help for more options.

The ip only needs to be set to 192.168.128.1 if you are running the cache directly on the agent host. If you set up the cache to be accessible by multiple Actuated runner hosts use the appropriate interface IP address.

Check the status with:

sudo journalctl -u seaweedfs -f\n
"},{"location":"tasks/local-github-cache/#use-the-self-hosted-cache","title":"Use the self-hosted cache","text":"

To start using the local cache you will need to replace actions/cache@v3 with tespkg/actions-cache@v1 and add tespkg/actions-cache specific properties in addition to the actions/cache properties in your cache steps.

Some actions like setup-node, setup-python, etc come with build-in support for the GitHub actions cache. They are not directly compatible with the self-hosted S3 cache and you will need to configure caching manually.

This is an example to manually configure caching for go:

name: build\n\non: push\n\njobs:\nbuild:\nruns-on: actuated-4cpu-8gb\nsteps:\n- name: Setup Golang\nuses: actions/setup-go@v3\nwith:\ngo-version: ~1.21\ncache: false\n- name: Setup Golang caches\nuses: tespkg/actions-cache@v1\nwith:\nendpoint: \"192.168.128.1\"\nport: 8333\ninsecure: true\naccessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}\nsecretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}\nbucket: actuated-runners\nregion: local\nuse-fallback: true\n\n# actions/cache compatible properties: https://github.com/actions/cache\npath: |\n~/.cache/go-build\n~/go/pkg/mod\nkey: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}\nrestore-keys: |\n${{ runner.os }}-go-\n

tespkg/actions-cache specific properties:

  • use-fallback - option means that if Seaweedfs is not installed on the host, or is inaccessible, the action will fall back to using the GitHub cache.
  • bucket - the name of the bucket to use in Seaweedfs
  • region - the bucket region - use local when running your own S3 cache locally.
  • accessKey and secretKey - the credentials to use to access the bucket - we'd recommend using an organisation-level secret for this.
  • insecure - use http instead of https. You may want to create a self-signed certificate for the S3 service and set insecure: false to ensure that the connection is encrypted. If you're running builds within private repositories, tampering is unlikely.

Checkout the list of actions/cache examples to configure caching for different languages and frameworks. Remember to replace actions/cache@v3 with tespkg/actions-cache@v1 and add the additional properties mentioned above.

"},{"location":"tasks/local-github-cache/#caching-the-git-checkout","title":"Caching the git checkout","text":"

Caching the git checkout can save a lot of time especially for large repos.

jobs:\nbuild:\nruns-on: actuated-4cpu-8gb\nsteps:\n- name: \"Set current date as env variable\"\nshell: bash\nrun: |\necho \"CHECKOUT_DATE=$(date +'%V-%Y')\" >> $GITHUB_ENV\nid: date\n- uses: tespkg/actions-cache@v1\nwith:\nendpoint: \"192.168.128.1\"\nport: 8333\ninsecure: true\naccessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}\nsecretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}\nbucket: actuated-runners\nregion: local\nuse-fallback: true\npath: ./.git\nkey: ${{ runner.os }}-checkout-${{ env.CHECKOUT_DATE }}\nrestore-keys: |\n${{ runner.os }}-checkout-\n

The cache key uses a week-year format, rather than a SHA. Why? Because a SHA would change on every build, meaning that a save and load would be performed on every build, using up more space and slowing things down. In this example, there's only 52 cache entries per year.

"},{"location":"tasks/local-github-cache/#caching-node_modules-with-pnpm","title":"Caching node_modules with pnpm","text":"

For Node.js projects, the node_modules folder and yarn cache can become huge and take a long time to download. Switching to a local S3 cache can help bring that time down.

This example uses pnpm, a fast, disk space efficient replacement for npm and yarn.

jobs:\nbuild:\nruns-on: actuated-4cpu-8gb\nsteps:\n- name: Install PNPM\nuses: pnpm/action-setup@v2\nwith:\nrun_install: |\n- args: [--global, node-gyp]\n\n- name: Get pnpm store directory\nid: pnpm-cache\nshell: bash\nrun: |\necho \"STORE_PATH=$(pnpm store path)\" >> $GITHUB_OUTPUT\n\n- uses: tespkg/actions-cache@v1\nwith:\nendpoint: \"192.168.128.1\"\nport: 8333\ninsecure: true\naccessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}\nsecretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}\nbucket: actuated-runners\nregion: local\nuse-fallback: true\npath:\n${{ steps.pnpm-cache.outputs.STORE_PATH }}\n~/.cache\n.cache\nkey: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}\nrestore-keys: |\n${{ runner.os }}-pnpm-store-\n\n- name: Install dependencies\nshell: bash\nrun: |\npnpm install --frozen-lockfile --prefer-offline\n
"},{"location":"tasks/local-github-cache/#further-reading","title":"Further reading","text":"
  • From our blog: Fixing the cache latency for self-hosted GitHub Actions
  • A primer on using the GitHub Actions cache: Using caching in builds
"},{"location":"tasks/monitoring/","title":"Monitoring","text":"

Our team monitors actuated around the clock, on your behalf

The actuated team proactively monitors your servers and build queue for issues. We remediate them on your behalf and for anything cannot be fixed remotely, we'll be in touch via Slack or email.

The actuated CLI should be used for support, to query the agent's logs, or the logs of individual VMs.

If you would also like to do your own monitoring, you can purchase a monitoring add-on, which will expose metrics for your own Prometheus instance. You can then set up a Grafana dashboard to view the metrics.

The monitoring add-on provides:

  • Control-plane metrics such as queue-depth, delayed VM launches, and failed jobs.
  • Server metrics such as VMs running, load averages, and network I/O.

To opt-in, follow the instructions in the dashboard.

"},{"location":"tasks/monitoring/#scrape-the-metrics","title":"Scrape the metrics","text":"

Metrics are currently made available through Prometheus federation. Prometheus can be run with Docker, as a Kubernetes deployment, or as a standalone binary.

You can add a scrape target to your own Prometheus instance, or you can use the Grafana Agent to do that and ship off the metrics to Grafana Cloud.

Here is a sample scrape config for Prometheus:

scrape_configs:\n- job_name: \"actuated\"\n\nbearer_token: \"xyz\"\nscheme: https\nmetrics_path: /api/v1/metrics/federate\n\n# Note: this value cannot be changed lower than 60s due to rate-limiting\nscrape_interval: 60s\nscrape_timeout: 5s\nstatic_configs:\n- targets: [\"tbc:443\"]\n

The bearer_token is a secret, and unique per customer. Only a bcrypt hash is stored in the control-plane, along with a mapping between GitHub organisations and the token.

The scrape_interval must be 60s, or higher to avoid rate-limiting.

Contact the support team on Slack for the value for the targets field.

"},{"location":"tasks/monitoring/#control-plane-metrics","title":"Control-plane metrics","text":"

Check the pending build queue depth:

actuated_controller_db_queued_status>0\n

Check for delayed VM launches (due to insufficient capacity):

sum by(owner) ( rate(actuated_controller_vm_launch_delayed_total\n[$__rate_interval]) )>0\n

Failed jobs:

sum by (owner) (actuated_controller_job_failed_total{}) > 0\n

Rate of jobs queued:

sum by(owner) ( rate(actuated_controller_job_queued_total[$__rate_interval]) ) > 0   \n

Rate of VMs launched:

sum by(owner) (rate(actuated_controller_vm_launch_total [$__rate_interval]))>0\n
"},{"location":"tasks/monitoring/#server-metrics","title":"Server metrics","text":"

VMs running by host:

sum by(job) (actuated_vm_running_gauge) > 0 \n

Free RAM by host:

actuated_system_memory_available_bytes{}\n

VM launch total:

sum by(job) ( actuated_vm_launch_total )\n

Load averages by host:

actuated_system_load_avg_1\n\nactuated_system_load_avg_5\n\nactuated_system_load_avg_15\n

Net I/O from egress adapter:

sum by( job) (  actuated_system_egress_rx )\n\nsum by( job) (  actuated_system_egress_tx )\n
"},{"location":"tasks/registry-mirror/","title":"Example: Set up a registry mirror","text":"

Use-cases:

  • Increase speed of pulls and builds by caching images on Actuated Servers
  • Reduce failed builds due to rate-limiting

If you use Docker in your self-hosted builds, there is a chance that you'll run into the rather conservative rate-limits.

See also: Docker.com: Download rate limit

The Docker Hub allows for 100 image pulls within a 6 hour period, but this can be extended to 200 by logging in, or to 5000 by paying for a Pro license.

A registry mirror / pull-through cache running on an actuated agent is significantly faster than pulling from a remote server.

We will create a mirror that:

  • Has no authentication, to keep the changes to your build to a minimum
  • Is read-only - for pulling images only
  • Only has access to pull images from the Docker Hub
  • Is not exposed to the Internet, but only to Actuated VMs
  • When unavailable for any reason, the build continues without error
  • Works on both Intel/AMD and ARM64 hosts

This tutorial shows you how to set up what was previously known as \"Docker's Open Source Registry\" and is now a CNCF project called distribution.

If you'd like to mirror another registry like gcr.io, ecr.io, quay.io, or your own registry, then you can use the same approach, but run each registry on a different port. The configuration may need to be set up manually, since the current action we have built is only designed for one mirror.

Certified for:

  • x86_64
  • arm64
"},{"location":"tasks/registry-mirror/#create-a-docker-hub-access-token","title":"Create a Docker Hub Access token","text":"

Create a Docker Hub Access token with \"Public repos only\" scope, and save it as ~/hub.txt on the Actuated Server.

Settings for an authorization token, with read-only permissions to public repositories

"},{"location":"tasks/registry-mirror/#set-up-the-registry-on-an-actuated-agent","title":"Set up the registry on an actuated agent","text":"
(\ncurl -sLS https://get.arkade.dev | sudo sh\n\n  sudo arkade system install registry\n\n  sudo mkdir -p /etc/registry\n  sudo mkdir -p /var/lib/registry\n)\n

Create a config file to make the registry only available on the Linux bridge for Actuated VMs.

Before doing so, you'll need to:

  1. Create a file named hub.txt in your home directory.
  2. Set the USERNAME variable to your Docker Hub username.
export USERNAME=\"\"\nexport TOKEN=$(cat ~/hub.txt)\n\ncat >> /tmp/registry.yml <<EOF\nversion: 0.1\nlog:\n  accesslog:\n    disabled: true\n  level: warn\n  formatter: text\n\nstorage:\n  filesystem:\n    rootdirectory: /var/lib/registry\n\nproxy:\n  remoteurl: https://registry-1.docker.io\n  username: $USERNAME\n\n  # A Docker Hub Personal Access token created with \"Public repos only\" scope\n  password: $TOKEN\n\nhttp:\n  addr: 192.168.128.1:5000\n  relativeurls: false\n  draintimeout: 60s\nEOF\n\nsudo mv /tmp/registry.yml /etc/registry/config.yml\n

Install and start the registry with a systemd unit file:

(\ncat >> /tmp/registry.service <<EOF\n[Unit]\nDescription=Registry\nAfter=network.target actuated.service\n\n[Service]\nType=simple\nRestart=always\nRestartSec=5s\nExecStart=/usr/local/bin/registry serve /etc/registry/config.yml\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsudo mv /tmp/registry.service /etc/systemd/system/registry.service\nsudo systemctl daemon-reload\nsudo systemctl enable registry --now\n)\n

Check the status with:

sudo journalctl -u registry -f\n
"},{"location":"tasks/registry-mirror/#use-the-registry-within-a-workflow","title":"Use the registry within a workflow","text":"

Create a new registry in your organisation, along with a: .github/workflows/build.yml file and commit it to the repository.

name: CI\n\non:\npull_request:\nbranches:\n- '*'\npush:\nbranches:\n- master\n- main\n\njobs:\nbuild:\nruns-on: [actuated]\nsteps:\n\n- name: Setup mirror\nuses: self-actuated/hub-mirror@master\n\n- name: Checkout\nuses: actions/checkout@v2\n\n- name: Pull image using cache\nrun: |\ndocker pull alpine:latest\n

Note

The self-actuated/hub-mirror action already runs the docker/setup-buildx action, so if you have that in your builds already, you can remove it, otherwise it will overwrite the settings for the mirror. Alternatively, move the self-actuated/hub-mirror action to after the docker/setup-buildx action.

"},{"location":"tasks/registry-mirror/#checking-if-it-worked","title":"Checking if it worked","text":"

You'll see the build run, and cached artifacts appearing in: /var/lib/registry/.

find /var/lib/registry/ -name \"alpine\"\n\n/var/lib/registry/docker/registry/v2/repositories/library/alpine\n

You can also use the registry's API to query which images are available:

curl -i http://192.168.128.1:5000/v2/_catalog\n\nHTTP/1.1 200 OK\nContent-Type: application/json; charset=utf-8\nDocker-Distribution-Api-Version: registry/2.0\nDate: Wed, 16 Nov 2022 09:41:18 GMT\nContent-Length: 52\n\n{\"repositories\":[\"library/alpine\",\"moby/buildkit\"]}\n

You can check the status of the mirror at any time with:

sudo journalctl -u registry --since today\n

If you're not sure if the registry is working, or want to troubleshoot it, you can enable verbose logging, by editing the log section of the service file.

log:\n  accesslog:\n    disabled: false\n  level: debug\n  formatter: text\n

Then restart the service, and check the logs again. We do not recommend keeping this setting live as it will fill up the logs and disk quickly.

"},{"location":"tasks/registry-mirror/#a-note-on-kind","title":"A note on KinD","text":"

The self-actuated/hub-mirror action will configure both the Docker Daemon, and buildkit, however KinD uses its own instance of containerd and so must be configured separately.

See notes on KinD with actuated for more information.

"},{"location":"tasks/registry-mirror/#further-reading","title":"Further reading","text":"
  • Docker: Configuration for the registry
  • GitHub: View the project on GitHub
"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..53885c3 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,148 @@ + + + + https://docs.actuated.dev/ + 2024-05-17 + daily + + + https://docs.actuated.dev/contact/ + 2024-05-17 + daily + + + https://docs.actuated.dev/dashboard/ + 2024-05-17 + daily + + + https://docs.actuated.dev/expose-agent/ + 2024-05-17 + daily + + + https://docs.actuated.dev/faq/ + 2024-05-17 + daily + + + https://docs.actuated.dev/install-agent/ + 2024-05-17 + daily + + + https://docs.actuated.dev/provision-server/ + 2024-05-17 + daily + + + https://docs.actuated.dev/register/ + 2024-05-17 + daily + + + https://docs.actuated.dev/roadmap/ + 2024-05-17 + daily + + + https://docs.actuated.dev/test-build/ + 2024-05-17 + daily + + + https://docs.actuated.dev/troubleshooting/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/custom-vm-size/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/docker/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/github-actions-cache/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/k3s/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/kernel/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/kind/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/kvm-guest/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/matrix-k8s/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/matrix/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/multiarch-buildx/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/openfaas-helm/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/openfaas-publish/ + 2024-05-17 + daily + + + https://docs.actuated.dev/examples/system-info/ + 2024-05-17 + daily + + + https://docs.actuated.dev/tasks/cli/ + 2024-05-17 + daily + + + https://docs.actuated.dev/tasks/debug-ssh/ + 2024-05-17 + daily + + + https://docs.actuated.dev/tasks/local-github-cache/ + 2024-05-17 + daily + + + https://docs.actuated.dev/tasks/monitoring/ + 2024-05-17 + daily + + + https://docs.actuated.dev/tasks/registry-mirror/ + 2024-05-17 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000..d1aefcd Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tasks/cli/index.html b/tasks/cli/index.html new file mode 100644 index 0000000..8fd60a1 --- /dev/null +++ b/tasks/cli/index.html @@ -0,0 +1,1201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set-up the CLI - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Actuated CLI

+

Monitor Actuated runners and jobs from the command line.

+

Installation

+

Download and installation instruction are available via the actuated-dashboard

+

You'll need to run actuated-cli auth first, so that you can get a Personal Access Token with the appropriate scopes from GitHub.

+

View queued jobs

+
actuated-cli jobs \
+    actuated-samples
+
+

View runners for organization

+
actuated-cli runners \
+    actuated-samples
+
+

View SSH sessions available:

+
actuated-cli ssh ls
+
+

Hosts are ordered by the connected time.

+
| NO  |   ACTOR   |                 HOSTNAME                 |  RX   |  TX   | CONNECTED |
+|-----|-----------|------------------------------------------|-------|-------|-----------|
+|   1 | alexellis | 6aafd53144e2f00ef5cd2c16681eeab4712561a6 | 13679 | 10371 | 6m4s      |
+|   2 | alexellis | fv-az268-245                             | 23124 | 13828 | 12m2s     |
+
+

Connect to an SSH session

+

Connect to the first available session from your account:

+
actuated-cli ssh connect
+
+

Connected to the second session in the list:

+
actuated-cli ssh connect 2
+
+

Connect to a specific session by hostname:

+
actuated-cli ssh connect runner1
+
+

Connect to a specific session with a host prefix:

+
actuated-cli ssh connect 6aafd
+
+

Check the logs of VMs

+

View the serial console and systemd output of the VMs launched on a specific server.

+
    +
  • Check for timeouts with GitHub's control-plane
  • +
  • View output from the GitHub runner binary
  • +
  • See boot-up messages
  • +
  • Check for errors if the GitHub Runner binary is out of date
  • +
+
actuated-cli logs \
+    --owner actuated-samples \
+    --age 15m \
+    runner1
+
+

The age is specified as a Go duration i.e. 60m or 24h.

+

You can also get the logs for a specific runner by using the --id flag.

+
actuated-cli logs \
+    --owner actuated-samples \
+    --id ea5c285282620927689d90af3cfa3be2d5e2d004
+    runner1
+
+

Check the logs of the actuated agent service

+

Show the logs of the actuated agent binary running on your server.

+

View VM launch times, etc.

+
actuated-cli agent-logs \
+    --owner actuated-samples \
+    --age 60m \
+    runner1
+
+

Schedule a repair to re-queue jobs

+

If a job has been retried for 30 minutes, without a runner to take it, it'll be taken off the queue. This command will re-queue all jobs that are in a "queued" state.

+

Run sparingly because it will launch one VM per job queued.

+
actuated-cli repair \
+    actuated-samples
+
+

Rescue a remote server

+

Restart the agent by sending a kill -9 signal:

+
actuated-cli restart \
+    --owner actuated-samples \
+    runner1
+
+

Any inflight VMs will be killed, see also: actuated-cli update --force

+

Reboot the machine, if in an unrecoverable position:

+
actuated-cli restart \
+    --owner actuated-samples \
+    --reboot
+    runner1
+
+

Use with caution, since this may not perform a safe and clean shutdown.

+

JSON mode

+

Add --json to any command to get JSON output for scripting.

+

API rate limits apply, so do not run the CLI within a loop or watch command.

+

Help & support

+

Reach out to our team on Slack

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tasks/debug-ssh/index.html b/tasks/debug-ssh/index.html new file mode 100644 index 0000000..4c63d90 --- /dev/null +++ b/tasks/debug-ssh/index.html @@ -0,0 +1,1015 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Debug a job with SSH - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Example: Debug a job with SSH

+

If your it's included within your actuated plan, then you can get a shell into any self-hosted runner - including GitHub's own hosted runners.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64
  • +
+
+

Use a private repository if you're not using actuated yet

+

GitHub recommends using a private repository with self-hosted runners because changes can be left over from a previous run, even when using Actions Runtime Controller. Actuated uses an ephemeral VM with an immutable image, so can be used on both public and private repos. Learn why in the FAQ.

+
+

Try out the action on your agent

+

You'll need to add the id_token: write permission to your workflow to use this action. It allows the action to authenticate with the SSH gateway using an GitHub Actions OIDC token.

+

Create a .github/workflows/workflow.yaml file

+
name: connect
+
+on:
+  push:
+    branches:
+      - master
+      - main
+  workflow_dispatch:
+
+permissions:
+  id-token: write
+  contents: read
+  actions: read
+
+jobs:
+  connect:
+    name: connect
+    runs-on: actuated-4cpu-12gb
+    steps:
+    - uses: self-actuated/connect-ssh@master
+
+

Next, trigger a build via the workflow_dispatch event or a git push to the master branch.

+

Open https://$SSH_GATEWAY/list in your browser and look for your session, you can log in using the SSH command outputted for you.

+

Alternatively, you can view your own SSH sessions from the actuated dashboard.

+

Whenever you have a build that you just can't figure out - or if you want to explore the runner and tune it up to your needs, then you can simply add - uses: self-actuated/connect-ssh@master where you want to pause the build.

+

To release the session run unblock or sudo reboot from the SSH session.

+

Watch a demo:

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tasks/local-github-cache/index.html b/tasks/local-github-cache/index.html new file mode 100644 index 0000000..c2a653d --- /dev/null +++ b/tasks/local-github-cache/index.html @@ -0,0 +1,1260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Setup self-hosted cache - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Run a local GitHub Cache

+

The cache for GitHub Actions can speed up CI/CD pipelines. Hosted runners are placed close to the cache which means the latency is very low. Self-hosted runners can also make good use of this cache. Just like caching container images on the host in a registry mirror, you can also get a speed boost over the hosted cache by running your own cache directly on the host.

+

To improve cache speeds with Actuated runners you can run a self-hosted S3 server and switch out the official actions/cache@v3 with tespkg/actions-cache@v1. The tespkg/actions-cache@v1 can target S3 instead of the proprietary GitHub cache.

+

You can run the cache on every actuated server for the speed of communicating over a loopback network, or you can run it on a single dedicated server that's placed in the same region as the actuated servers, which will still be very quick.

+
+

Note that if you have multiple actuated hosts consider running a single dedicated server for the cache. Subsequent jobs can be scheduled to different hosts so there is no guarantee the cache is populated when running a cache on every actuated server.

+
+

Set up an S3 cache

+

There are a couple of options to run a self-hosted S3 server, most notably Seaweedfs and Minio.

+

This guide will cover the setup of Seaweedfs but any S3 compatible service will work in a very similar way.

+

Install Seaweedfs

+

Seaweedfs is distributed as a static Go binary, so it can be installed with arkade, or from the GitHub releases page.

+
arkade get seaweedfs
+sudo mv ~/.arkade/bin/seaweedfs /usr/local/bin
+
+

Define a secret key and access key to be used from the CI jobs in the /etc/seaweedfs/s3.conf file.

+

Generate a secret key: openssl rand -hex 16 > secret_key

+
export ACCESS_KEY="" # Replace with your access key
+export SECRET_KEY="$(cat ~/secret_key)"
+
+cat >> /tmp/s3.conf <<EOF
+{
+  "identities": [
+    {
+      "name": "actuated",
+      "credentials": [
+        {
+          "accessKey": "$ACCESS_KEY",
+          "secretKey": "$SECRET_KEY"
+        }
+      ],
+      "actions": [
+        "Admin",
+        "Read",
+        "List",
+        "Tagging",
+        "Write"
+      ]
+    }
+  ]
+}
+EOF
+
+mkdir -p /etc/seaweedfs
+sudo mv /tmp/s3.conf /etc/seaweedfs/s3.conf
+
+

Install and start Seaweedfs with a systemd unit file:

+
(
+cat >> /tmp/seaweedfs.service <<EOF
+[Unit]
+Description=SeaweedFS
+After=network.target
+
+[Service]
+User=root
+ExecStart=/usr/local/bin/seaweedfs server -ip=192.168.128.1 -volume.max=0 -volume.fileSizeLimitMB=2048 -dir=/home/runner-cache -s3 -s3.config=/etc/seaweedfs/s3.conf
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+mkdir -p /home/runner-cache
+sudo mv /tmp/seaweedfs.service /etc/systemd/system/seaweedfs.service
+sudo systemctl daemon-reload
+sudo systemctl enable seaweedfs --now
+)
+
+

We have set -volume.max=0 -volume.fileSizeLimitMB=2048 to minimize the amount of space used and to allow large zip files of up to 2GB, but you can change this to suit your needs. See seaweedfs server --help for more options.

+

The ip only needs to be set to 192.168.128.1 if you are running the cache directly on the agent host. If you set up the cache to be accessible by multiple Actuated runner hosts use the appropriate interface IP address.

+

Check the status with:

+
sudo journalctl -u seaweedfs -f
+
+

Use the self-hosted cache

+

To start using the local cache you will need to replace actions/cache@v3 with tespkg/actions-cache@v1 and add tespkg/actions-cache specific properties in addition to the actions/cache properties in your cache steps.

+

Some actions like setup-node, setup-python, etc come with build-in support for the GitHub actions cache. They are not directly compatible with the self-hosted S3 cache and you will need to configure caching manually.

+

This is an example to manually configure caching for go:

+
name: build
+
+on: push
+
+jobs:
+  build:
+    runs-on: actuated-4cpu-8gb
+    steps:
+    - name: Setup Golang
+      uses: actions/setup-go@v3
+      with:
+        go-version: ~1.21
+        cache: false
+    - name: Setup Golang caches
+      uses: tespkg/actions-cache@v1
+      with:
+        endpoint: "192.168.128.1"
+        port: 8333
+        insecure: true
+        accessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}
+        secretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}
+        bucket: actuated-runners
+        region: local
+        use-fallback: true
+
+        # actions/cache compatible properties: https://github.com/actions/cache
+        path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+        key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: |
+            ${{ runner.os }}-go-
+
+

tespkg/actions-cache specific properties:

+
    +
  • use-fallback - option means that if Seaweedfs is not installed on the host, or is inaccessible, the action will fall back to using the GitHub cache.
  • +
  • bucket - the name of the bucket to use in Seaweedfs
  • +
  • region - the bucket region - use local when running your own S3 cache locally.
  • +
  • accessKey and secretKey - the credentials to use to access the bucket - we'd recommend using an organisation-level secret for this.
  • +
  • insecure - use http instead of https. You may want to create a self-signed certificate for the S3 service and set insecure: false to ensure that the connection is encrypted. If you're running builds within private repositories, tampering is unlikely.
  • +
+

Checkout the list of actions/cache examples to configure caching for different languages and frameworks. Remember to replace actions/cache@v3 with tespkg/actions-cache@v1 and add the additional properties mentioned above.

+

Caching the git checkout

+

Caching the git checkout can save a lot of time especially for large repos.

+
jobs:
+  build:
+    runs-on: actuated-4cpu-8gb
+    steps:
+    - name: "Set current date as env variable"
+      shell: bash
+      run: |
+        echo "CHECKOUT_DATE=$(date +'%V-%Y')" >> $GITHUB_ENV
+      id: date
+    - uses: tespkg/actions-cache@v1
+      with:
+        endpoint: "192.168.128.1"
+        port: 8333
+        insecure: true
+        accessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}
+        secretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}
+        bucket: actuated-runners
+        region: local
+        use-fallback: true
+        path: ./.git
+        key: ${{ runner.os }}-checkout-${{ env.CHECKOUT_DATE }}
+        restore-keys: |
+          ${{ runner.os }}-checkout-
+
+

The cache key uses a week-year format, rather than a SHA. Why? Because a SHA would change on every build, meaning that a save and load would be performed on every build, using up more space and slowing things down. In this example, there's only 52 cache entries per year.

+

Caching node_modules with pnpm

+

For Node.js projects, the node_modules folder and yarn cache can become huge and take a long time to download. Switching to a local S3 cache can help bring that time down.

+

This example uses pnpm, a fast, disk space efficient replacement for npm and yarn.

+
jobs:
+  build:
+    runs-on: actuated-4cpu-8gb
+    steps:
+    - name: Install PNPM
+      uses: pnpm/action-setup@v2
+      with:
+        run_install: |
+          - args: [--global, node-gyp]
+
+    - name: Get pnpm store directory
+      id: pnpm-cache
+      shell: bash
+      run: |
+        echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
+
+    - uses: tespkg/actions-cache@v1
+      with:
+        endpoint: "192.168.128.1"
+        port: 8333
+        insecure: true
+        accessKey: ${{ secrets.ACTIONS_CACHE_ACCESS_KEY }}
+        secretKey: ${{ secrets.ACTIONS_CACHE_SECRET_KEY }}
+        bucket: actuated-runners
+        region: local
+        use-fallback: true
+        path:
+          ${{ steps.pnpm-cache.outputs.STORE_PATH }}
+          ~/.cache
+          .cache
+        key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
+        restore-keys: |
+          ${{ runner.os }}-pnpm-store-
+
+    - name: Install dependencies
+      shell: bash
+      run: |
+        pnpm install --frozen-lockfile --prefer-offline
+
+

Further reading

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tasks/monitoring/index.html b/tasks/monitoring/index.html new file mode 100644 index 0000000..69b38cf --- /dev/null +++ b/tasks/monitoring/index.html @@ -0,0 +1,1085 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Monitoring - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Monitoring

+
+

Our team monitors actuated around the clock, on your behalf

+

The actuated team proactively monitors your servers and build queue for issues. We remediate them on your behalf and for anything cannot be fixed remotely, we'll be in touch via Slack or email.

+
+

The actuated CLI should be used for support, to query the agent's logs, or the logs of individual VMs.

+

If you would also like to do your own monitoring, you can purchase a monitoring add-on, which will expose metrics for your own Prometheus instance. You can then set up a Grafana dashboard to view the metrics.

+

The monitoring add-on provides:

+
    +
  • Control-plane metrics such as queue-depth, delayed VM launches, and failed jobs.
  • +
  • Server metrics such as VMs running, load averages, and network I/O.
  • +
+

To opt-in, follow the instructions in the dashboard.

+

Scrape the metrics

+

Metrics are currently made available through Prometheus federation. Prometheus can be run with Docker, as a Kubernetes deployment, or as a standalone binary.

+

You can add a scrape target to your own Prometheus instance, or you can use the Grafana Agent to do that and ship off the metrics to Grafana Cloud.

+

Here is a sample scrape config for Prometheus:

+
scrape_configs:
+  - job_name: "actuated"
+
+    bearer_token: "xyz"
+    scheme: https
+    metrics_path: /api/v1/metrics/federate
+
+    # Note: this value cannot be changed lower than 60s due to rate-limiting
+    scrape_interval: 60s
+    scrape_timeout: 5s
+    static_configs:
+    - targets: ["tbc:443"]
+
+

The bearer_token is a secret, and unique per customer. Only a bcrypt hash is stored in the control-plane, along with a mapping between GitHub organisations and the token.

+

The scrape_interval must be 60s, or higher to avoid rate-limiting.

+

Contact the support team on Slack for the value for the targets field.

+

Control-plane metrics

+

Check the pending build queue depth:

+
actuated_controller_db_queued_status>0
+
+

Check for delayed VM launches (due to insufficient capacity):

+
sum by(owner) ( rate(actuated_controller_vm_launch_delayed_total
+[$__rate_interval]) )>0
+
+

Failed jobs:

+
sum by (owner) (actuated_controller_job_failed_total{}) > 0
+
+

Rate of jobs queued:

+
sum by(owner) ( rate(actuated_controller_job_queued_total[$__rate_interval]) ) > 0   
+
+

Rate of VMs launched:

+
sum by(owner) (rate(actuated_controller_vm_launch_total [$__rate_interval]))>0
+
+

Server metrics

+

VMs running by host:

+
sum by(job) (actuated_vm_running_gauge) > 0 
+
+

Free RAM by host:

+
actuated_system_memory_available_bytes{}
+
+

VM launch total:

+
sum by(job) ( actuated_vm_launch_total )
+
+

Load averages by host:

+
actuated_system_load_avg_1
+
+actuated_system_load_avg_5
+
+actuated_system_load_avg_15
+
+

Net I/O from egress adapter:

+
sum by( job) (  actuated_system_egress_rx )
+
+sum by( job) (  actuated_system_egress_tx )
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tasks/registry-mirror/index.html b/tasks/registry-mirror/index.html new file mode 100644 index 0000000..83d108c --- /dev/null +++ b/tasks/registry-mirror/index.html @@ -0,0 +1,1214 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Setup a Registry Mirror - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Example: Set up a registry mirror

+

Use-cases:

+
    +
  • Increase speed of pulls and builds by caching images on Actuated Servers
  • +
  • Reduce failed builds due to rate-limiting
  • +
+

If you use Docker in your self-hosted builds, there is a chance that you'll run into the rather conservative rate-limits.

+

See also: Docker.com: Download rate limit

+

The Docker Hub allows for 100 image pulls within a 6 hour period, but this can be extended to 200 by logging in, or to 5000 by paying for a Pro license.

+

A registry mirror / pull-through cache running on an actuated agent is significantly faster than pulling from a remote server.

+

We will create a mirror that:

+
    +
  • Has no authentication, to keep the changes to your build to a minimum
  • +
  • Is read-only - for pulling images only
  • +
  • Only has access to pull images from the Docker Hub
  • +
  • Is not exposed to the Internet, but only to Actuated VMs
  • +
  • When unavailable for any reason, the build continues without error
  • +
  • Works on both Intel/AMD and ARM64 hosts
  • +
+

This tutorial shows you how to set up what was previously known as "Docker's Open Source Registry" and is now a CNCF project called distribution.

+

If you'd like to mirror another registry like gcr.io, ecr.io, quay.io, or your own registry, then you can use the same approach, but run each registry on a different port. The configuration may need to be set up manually, since the current action we have built is only designed for one mirror.

+

Certified for:

+
    +
  • x86_64
  • +
  • arm64
  • +
+

Create a Docker Hub Access token

+

Create a Docker Hub Access token with "Public repos only" scope, and save it as ~/hub.txt on the Actuated Server.

+

Settings for a public token

+
+

Settings for an authorization token, with read-only permissions to public repositories

+
+

Set up the registry on an actuated agent

+
(
+  curl -sLS https://get.arkade.dev | sudo sh
+
+  sudo arkade system install registry
+
+  sudo mkdir -p /etc/registry
+  sudo mkdir -p /var/lib/registry
+)
+
+

Create a config file to make the registry only available on the Linux bridge for Actuated VMs.

+

Before doing so, you'll need to:

+
    +
  1. Create a file named hub.txt in your home directory.
  2. +
  3. Set the USERNAME variable to your Docker Hub username.
  4. +
+
export USERNAME=""
+export TOKEN=$(cat ~/hub.txt)
+
+cat >> /tmp/registry.yml <<EOF
+version: 0.1
+log:
+  accesslog:
+    disabled: true
+  level: warn
+  formatter: text
+
+storage:
+  filesystem:
+    rootdirectory: /var/lib/registry
+
+proxy:
+  remoteurl: https://registry-1.docker.io
+  username: $USERNAME
+
+  # A Docker Hub Personal Access token created with "Public repos only" scope
+  password: $TOKEN
+
+http:
+  addr: 192.168.128.1:5000
+  relativeurls: false
+  draintimeout: 60s
+EOF
+
+sudo mv /tmp/registry.yml /etc/registry/config.yml
+
+

Install and start the registry with a systemd unit file:

+
(
+cat >> /tmp/registry.service <<EOF
+[Unit]
+Description=Registry
+After=network.target actuated.service
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=5s
+ExecStart=/usr/local/bin/registry serve /etc/registry/config.yml
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+sudo mv /tmp/registry.service /etc/systemd/system/registry.service
+sudo systemctl daemon-reload
+sudo systemctl enable registry --now
+)
+
+

Check the status with:

+
sudo journalctl -u registry -f
+
+

Use the registry within a workflow

+

Create a new registry in your organisation, along with a: .github/workflows/build.yml file and commit it to the repository.

+
name: CI
+
+on:
+  pull_request:
+    branches:
+      - '*'
+  push:
+    branches:
+      - master
+      - main
+
+jobs:
+    build:
+        runs-on: [actuated]
+        steps:
+
+        - name: Setup mirror
+          uses: self-actuated/hub-mirror@master
+
+        - name: Checkout
+            uses: actions/checkout@v2
+
+        - name: Pull image using cache
+            run: |
+            docker pull alpine:latest
+
+
+

Note

+

The self-actuated/hub-mirror action already runs the docker/setup-buildx action, so if you have that in your builds already, you can remove it, otherwise it will overwrite the settings for the mirror. Alternatively, move the self-actuated/hub-mirror action to after the docker/setup-buildx action.

+
+

Checking if it worked

+

You'll see the build run, and cached artifacts appearing in: /var/lib/registry/.

+
find /var/lib/registry/ -name "alpine"
+
+/var/lib/registry/docker/registry/v2/repositories/library/alpine
+
+

You can also use the registry's API to query which images are available:

+
curl -i http://192.168.128.1:5000/v2/_catalog
+
+HTTP/1.1 200 OK
+Content-Type: application/json; charset=utf-8
+Docker-Distribution-Api-Version: registry/2.0
+Date: Wed, 16 Nov 2022 09:41:18 GMT
+Content-Length: 52
+
+{"repositories":["library/alpine","moby/buildkit"]}
+
+

You can check the status of the mirror at any time with:

+
sudo journalctl -u registry --since today
+
+

If you're not sure if the registry is working, or want to troubleshoot it, you can enable verbose logging, by editing the log section of the service file.

+
log:
+  accesslog:
+    disabled: false
+  level: debug
+  formatter: text
+
+

Then restart the service, and check the logs again. We do not recommend keeping this setting live as it will fill up the logs and disk quickly.

+

A note on KinD

+

The self-actuated/hub-mirror action will configure both the Docker Daemon, and buildkit, however KinD uses its own instance of containerd and so must be configured separately.

+

See notes on KinD with actuated for more information.

+

Further reading

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/test-build/index.html b/test-build/index.html new file mode 100644 index 0000000..7938d35 --- /dev/null +++ b/test-build/index.html @@ -0,0 +1,1111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Run a test build - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + +

Run a test build

+

Now that you've registered your GitHub organisation, created a server, and configured the agent, you're ready for a test build.

+

We recommend you run the following build without changes to confirm that everything is working as expected. After that, you can modify an existing build and start using actuated for your team.

+

The below steps should take less than 10 minutes.

+

Create a repository and workflow

+

This build will show you the specs, OS and Kernel name reported by the MicroVM.

+

Note that if you're running on an Arm server, You'll adapt the prefix of runs-on: actuated-, to runs-on: actuated-arm64- instead.

+
    +
  1. +

    Create a test repository and a GitHub Action

    +

    Create ./.github/workflows/ci.yaml:

    +
    name: CI
    +
    +on:
    +    pull_request:
    +        branches:
    +        - '*'
    +    push:
    +        branches:
    +        - master
    +        - main
    +    workflow_dispatch:
    +
    +jobs:
    +    specs:
    +        name: specs
    +        # runs-on: actuated-arm64-2cpu-8gb
    +        runs-on: actuated-arm64-2cpu-8gb
    +        steps:
    +        - uses: actions/checkout@v1
    +        - name: Check specs
    +          run: |
    +            ./specs.sh
    +
    +

    Note that the runs-on: field says actuated- and not ubuntu-latest. This is how the actuated control plane knows to send this job to your agent. There are no fixed sets of vCPU and RAM, you can make up your own combinations.

    +

    Then add specs.sh to the root of the repository, and remember, that you must run chmod +x specs.sh afterwards to make it executable.

    +
    #!/bin/bash
    +
    +echo Information on main disk
    +df -h /
    +
    +echo Memory info
    +free -h
    +
    +echo Total CPUs:
    +echo CPUs: $(nproc)
    +
    +echo CPU Model
    +cat /proc/cpuinfo |grep -i "Model"|head -n 2
    +
    +echo Kernel and OS info
    +uname -a
    +
    +echo Generally, KVM should not be available unless specifically enabled
    +if ! [ -e /dev/kvm ]; then
    +    echo "/dev/kvm does not exist"
    +else
    +    echo "/dev/kvm exists"
    +fi
    +
    +echo OS
    +cat /etc/os-release
    +
    +echo Egress IP:
    +curl -s -L -S https://checkip.amazonaws.com
    +
    +echo Speed test of Internet
    +sudo pip install speedtest-cli
    +speedtest-cli
    +
    +echo Checking Docker
    +docker run alpine:latest cat /etc/os-release
    +
    +

    Don't leave out this step!

    +
    chmod +x ./specs.sh
    +
    +
  2. +
  3. +

    Hit commit, and watch the VM boot up.

    +

    You'll be able to see the runners registered for your organisation on the Actuated Dashboard along with the build queue and stats for the current day's builds.

    +
  4. +
  5. +

    If you're curious

    +

    You can view the logs of the agent by logging into one of the Actuated Servers with SSH and running the following commands:

    +
    sudo journalctl -u actuated -f -o cat
    +
    +# Just today's logs:
    +sudo journalctl -u actuated --since today -o cat
    +
    +

    And each VM writes the logs from its console and the GitHub Actions Runner to /var/log/actuated/.

    +
    sudo cat /var/log/actuated/*
    +
    +
  6. +
+

Do you have any questions or comments? Feel free to reach out to us over Slack in the #onboarding channel.

+

Enable actuated for an existing repository

+

To add actuated to an existing repository, simply edit the workflow YAML file and change runs-on: to runs-on: actuated-4cpu-16gb and for Arm builds, change it to: runs-on: actuated-arm64-4cpu-16gb. The values for CPU and RAM can be customised to your needs, and there are no hard-coded, or fixed combinations, to allow for flexibility.

+

Learn more in Custom VM Sizes

+ +

Do you use the Docker Hub in your builds? Any Dockerfile with a FROM that doesn't include a server name will be pulled from docker.io, and there are strict rate-limits for unauthenticated users.

+
    +
  1. +

    Option 1 - authenticate

    +

    Run docker login or use the Docker Login Action just before you run Docker build or pull down any images with tooling like KinD

    +
  2. +
  3. +

    Option 2 - use a cache/mirror

    +

    Use our guide to Set up a registry cache and mirror - this uses less bandwidth and increases the speed of builds where images are already present in the cache.

    +
  4. +
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/troubleshooting/index.html b/troubleshooting/index.html new file mode 100644 index 0000000..c6813bf --- /dev/null +++ b/troubleshooting/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting - Actuated + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Troubleshooting

+

Getting support

+

All customers have access to a public Slack channel for support and collaboration.

+

Enterprise customers may also have an upgraded SLA for support tickets via email and access to a private Slack channel.

+

Billing and your plan

+

Change your credit card

+

Sometimes credit card limits or virtual cards are used on a subscription. To change the credit card used for your subscription, click here: My Orders.

+

Upgrade your plan

+

If you'd like to upgrade your plan for more concurrent builds, a higher level of support or anything else, you can do so via the Lemon Squeezy dashboard, the additional amount will be applied pro-rata.

+

Update or review your plan

+

The Actuated Dashboard

+

The first port of call should be the Actuated Dashboard where you can check the status of your agents and see the current queue of jobs.

+

For security reasons, an administrator for your GitHub Organisation will need to approve the Actuated Dashboard for access to your organisation before team members will be able to see any data. Send them the link for the dashboard, and have them specifically tick the checkbox for your organisation when logging in for the first time.

+

If you missed this step, have them head over to their Applications Settings page, click "Authorized OAuth Apps" and then "Actuated Dashboard". On this page, under "Organization access" they can click "Grant" for each of your organisations registered for actuated.

+

OAuth Access Page

+
+

How to "Grant" access to the Dashboard.

+
+

Try a direct link here: Actuated Dashboard OAuth App

+

A job is stuck as "queued"

+

If you're using a private repo and the job is queued, let us know on Slack and we'll check the audit database to try and find out why.

+

To remotely check the logs of the actuated service on a host, run the following:

+
actuated-cli agent-logs --owner ORG --host HOST [--age 20m]
+
+

Or you can also check /var/log/actuated/ for log files, tail -n 20 /var/log/actuated/*.txt should show you any errors that may have occurred on any of the VM boot-ups or runner registrations. Or check sudo journalctl -u actuated to see what's happening within the actuated service.

+

Since 2022, the main GitHub service and/or Actions has had a high number of partial or full outages.

+

Check the GitHub Status Page to make sure GitHub is fully operational, and bear in mind that there could be an issue, even if it hasn't been shown on that page yet.

+

You can schedule additional VMs to launch, one per queued job with the CLI:

+
actuated-cli repair --org OWNER
+
+

This command should not be run multiple times without contacting support first.

+

You pull a lot of large images from the Docker Hub

+

As much as we like to make our images as small as possible, sometimes we just have to pull down either large artifacts or many smaller ones. It just can't be helped.

+

Since a MicroVM is a completely immutable environment, the pull needs to happen on each build, which is actually a good thing.

+

The pull speed can be dramatically improved by using a registry mirror on each agent:

+ +

You are running into rate limits when using container images from the Docker Hub

+

The Docker Hub implements stringent rate limits of 100 pulls per 6 hours, and 200 pulls per 6 hours if you log in. Pro accounts get an increased limit of 5000 pulls per 6 hours.

+

We've created simple instructions on how to set up a registry mirror to cache images on your Actuated Servers.

+ +

A job is running out of RAM or needs more cores

+

If you suspect a job is running out of RAM or would benefit from more vCPU, you can increase the allocation by changing the runs-on label, as follows:

+
-runs-on: actuated-8cpu-8gb
++runs-on: actuated-8cpu-16gb
+
+

You must set both RAM and vCPU at the same time, in the order of CPU (given in a whole number) followed by RAM (specified in GB)

+

For arm64 builds, the format follows the same convention: actuated-arm64-8cpu-16gb.'

+

Bear in mind that if you set the RAM higher than the default, this may result in fewer concurrent VMs being scheduled on a single server.

+

The maximum amount of vCPU that can be set for a single job is 32 vCPU, this is an implementation detail of Firecracker and may change in the future.

+

To find out exactly how many resources are required, see our blog post on right sizing with the vmmeter tool.

+

Disk space is running out for a job

+

The disk space allocated for jobs is 30GB by default, but this value can be increased. Contact the actuated team for instructions on how to do this.

+

A dedicated disk or partition should be allocated for your VMs, if that's not the case, contact us and we'll help you reconfigure the server.

+

Your agent has been offline or unavailable for a significant period of time

+

If your agent has been offline for a significant period of time, then our control plane will have disconnected it from its pool of available agents.

+

Contact us via Slack to have it reinstated.

+

You need to rotate the authentication token used for your agent

+

There should not be many reasons to rotate this token, however, if something's happened and it's been leaked or an employee has left the company, contact us via email for the update procedure.

+

You need to rotate your private/public keypair

+

Your private/public keypair is comparable to an SSH key, although it cannot be used to gain access to your agent via SSH.

+

If you need to rotate it for some reason, please contact us by email as soon as you can.

+

Your builds are slower than expected

+
    +
  • Check free disk space (df -h)
  • +
  • Check for unattended updates/upgrades (ps -ef | grep unattended-upgrades) and (ps -ef | grep apt)
  • +
+

If you're using spinning disks, then consider switching to SSDs. If you're already using SSDs, consider using PCIe/NVMe SSDs.

+

Finally, we do have another way to speed up microVMs by attaching another drive or partition to your host. Contact us for more information.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file