diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..8b700358d --- /dev/null +++ b/404.html @@ -0,0 +1,746 @@ + + + + + + + + + + + + + + + + + + + + + + DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 000000000..b20ec6835 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Build Backends

+

The build backend is a software or service responsible for actually building the images. DIB itself is not capable of +building images, it delegates this part to the build backend.

+

DIB supports multiple build backends. Currently, available backends are docker and kaniko. You can select the +backend to use with the --backend option.

+

Executor compatibility matrix

+ + + + + + + + + + + + + + + + + + + + + + + +
BackendLocalDockerKubernetes
Docker
Kaniko
+

Docker

+

The docker backend uses Docker behind the scenes, and runs docker build You need to have +the Docker CLI installed locally to use this backend.

+

Authentication

+

The Docker Daemon requires authentication to pull and push images from private registries. Run the +docker login command to authenticate.

+

Authentication settings are stored in a config.json file located by default in $HOME/.docker/. +If you need to provide a different configuration, you can set the DOCKER_CONFIG variable to the path to another +directory, which should contain a config.json file.

+

Remote Daemon

+

If you want to set a custom docker daemon host, you can set the DOCKER_HOST environment variable. The builds will then +run on the remote host instead of using the local Docker daemon.

+

BuildKit

+

If available, DIB will try to use the BuildKit engine to build images, which is faster than the default Docker +build engine.

+

Kaniko

+

Kaniko offers a way to build container images inside a container +or Kubernetes cluster, without the security tradeoff of running a docker daemon container with host privileges.

+
+

BuildKit

+

As Kaniko must run in a container, it requires Docker when running local builds as it uses the docker executor.

+
+

See the kaniko section in the configuration reference.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/best-practices/index.html b/best-practices/index.html new file mode 100644 index 000000000..24d7e6e79 --- /dev/null +++ b/best-practices/index.html @@ -0,0 +1,795 @@ + + + + + + + + + + + + + + + + + + + + + + + + Best practices - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

DIB Best Practices

+

Pin dependencies versions in Dockerfiles

+

As DIB only rebuilds images when something changes in the build context (including the Dockerfile), external +dependencies should always be pinned to a specific version, so upgrading the dependency triggers a rebuild.

+

Example: +

RUN apt-get install package@1.0.0
+

+

Use .dockerignore

+

The .dockerignore lists file patterns that should not be included in the build context. DIB also ignores those files +when it computes the checksum, so no rebuild is triggered when they are modified.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib/index.html b/cmd/dib/index.html new file mode 100644 index 000000000..17ac86d03 --- /dev/null +++ b/cmd/dib/index.html @@ -0,0 +1,844 @@ + + + + + + + + + + + + + + + + + + + + + + + + dib - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

dib

+ +

dib

+

An Opinionated Docker Image Builder

+

Synopsis

+

Docker Image Builder helps building a complex image dependency graph

+

Run dib --help for more information

+

Options

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -h, --help                         help for dib
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+ +
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_build/index.html b/cmd/dib_build/index.html new file mode 100644 index 000000000..5c572ca07 --- /dev/null +++ b/cmd/dib_build/index.html @@ -0,0 +1,956 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib build - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Build

+ +

dib build

+

Run docker images builds

+

Synopsis

+

dib build will compute the graph of images, and compare it to the last built state

+

For each image, if any file part of its docker context has changed, the image will be rebuilt. +Otherwise, dib will create a new tag based on the previous tag

+
dib build [flags]
+
+

Options

+
  -b, --backend string           Build Backend used to run image builds. Supported backends: [docker kaniko] (default "docker")
+      --dry-run                  Simulate what would happen without actually doing anything dangerous.
+      --force-rebuild            Forces rebuilding the entire image graph, without regarding if the target version already exists.
+  -h, --help                     help for build
+      --include-tests strings    List of test runners to exclude during the test phase.
+      --local-only               Build docker images locally, do not push on remote registry
+      --no-graph                 Disable generation of graph during the build process.
+      --no-retag                 Disable re-tagging images after build. Note that temporary tags with the "dev-" prefix may still be pushed to the registry.
+      --no-tests                 Disable execution of tests (unit tests, scans, etc...) after the build.
+      --rate-limit int           Concurrent number of builds that can run simultaneously (default 1)
+      --release dib.extra-tags   Enable release mode to tag all images with extra tags found in the dib.extra-tags Dockerfile labels.
+      --reports-dir string       Path to the directory where the reports are generated. (default "reports")
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib - An Opinionated Docker Image Builder
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_completion/index.html b/cmd/dib_completion/index.html new file mode 100644 index 000000000..d3d68f4ba --- /dev/null +++ b/cmd/dib_completion/index.html @@ -0,0 +1,854 @@ + + + + + + + + + + + + + + + + + + + + + + + + dib completion - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

dib completion

+ +

dib completion

+

Generate the autocompletion script for the specified shell

+

Synopsis

+

Generate the autocompletion script for dib for the specified shell. +See each sub-command's help for details on how to use the generated script.

+

Options

+
  -h, --help   help for completion
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+ +
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_completion_bash/index.html b/cmd/dib_completion_bash/index.html new file mode 100644 index 000000000..6f0aa8034 --- /dev/null +++ b/cmd/dib_completion_bash/index.html @@ -0,0 +1,1002 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib completion bash - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Bash

+ +

dib completion bash

+

Generate the autocompletion script for bash

+

Synopsis

+

Generate the autocompletion script for the bash shell.

+

This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager.

+

To load completions in your current shell session:

+
source <(dib completion bash)
+
+ +

To load completions for every new session, execute once:

+

Linux:

+
dib completion bash > /etc/bash_completion.d/dib
+
+ +

macOS:

+
dib completion bash > $(brew --prefix)/etc/bash_completion.d/dib
+
+ +

You will need to start a new shell for this setup to take effect.

+
dib completion bash
+
+

Options

+
  -h, --help              help for bash
+      --no-descriptions   disable completion descriptions
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib completion - Generate the autocompletion script for the specified shell
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_completion_fish/index.html b/cmd/dib_completion_fish/index.html new file mode 100644 index 000000000..da7d9f43e --- /dev/null +++ b/cmd/dib_completion_fish/index.html @@ -0,0 +1,955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib completion fish - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Fish

+ +

dib completion fish

+

Generate the autocompletion script for fish

+

Synopsis

+

Generate the autocompletion script for the fish shell.

+

To load completions in your current shell session:

+
dib completion fish | source
+
+ +

To load completions for every new session, execute once:

+
dib completion fish > ~/.config/fish/completions/dib.fish
+
+ +

You will need to start a new shell for this setup to take effect.

+
dib completion fish [flags]
+
+

Options

+
  -h, --help              help for fish
+      --no-descriptions   disable completion descriptions
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib completion - Generate the autocompletion script for the specified shell
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_completion_powershell/index.html b/cmd/dib_completion_powershell/index.html new file mode 100644 index 000000000..dacc2f246 --- /dev/null +++ b/cmd/dib_completion_powershell/index.html @@ -0,0 +1,952 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib completion powershell - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Powershell

+ +

dib completion powershell

+

Generate the autocompletion script for powershell

+

Synopsis

+

Generate the autocompletion script for powershell.

+

To load completions in your current shell session:

+
dib completion powershell | Out-String | Invoke-Expression
+
+ +

To load completions for every new session, add the output of the above command +to your powershell profile.

+
dib completion powershell [flags]
+
+

Options

+
  -h, --help              help for powershell
+      --no-descriptions   disable completion descriptions
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib completion - Generate the autocompletion script for the specified shell
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_completion_zsh/index.html b/cmd/dib_completion_zsh/index.html new file mode 100644 index 000000000..39af531d2 --- /dev/null +++ b/cmd/dib_completion_zsh/index.html @@ -0,0 +1,1005 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib completion zsh - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

ZSH

+ +

dib completion zsh

+

Generate the autocompletion script for zsh

+

Synopsis

+

Generate the autocompletion script for the zsh shell.

+

If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once:

+
echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+ +

To load completions in your current shell session:

+
source <(dib completion zsh)
+
+ +

To load completions for every new session, execute once:

+

Linux:

+
dib completion zsh > "${fpath[1]}/_dib"
+
+ +

macOS:

+
dib completion zsh > $(brew --prefix)/share/zsh/site-functions/_dib
+
+ +

You will need to start a new shell for this setup to take effect.

+
dib completion zsh [flags]
+
+

Options

+
  -h, --help              help for zsh
+      --no-descriptions   disable completion descriptions
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib completion - Generate the autocompletion script for the specified shell
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_list/index.html b/cmd/dib_list/index.html new file mode 100644 index 000000000..5dc76fa66 --- /dev/null +++ b/cmd/dib_list/index.html @@ -0,0 +1,945 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib list - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

List

+ +

dib list

+

Print list of images managed by DIB

+

Synopsis

+

dib list will print a list of all Docker images managed by DIB

+
dib list [flags]
+
+

Options

+
  -h, --help            help for list
+  -o, --output string   Output format (console|go-template-file)
+                        You can provide a custom format using go-template: like this: "-o go-template-file=...".
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib - An Opinionated Docker Image Builder
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/cmd/dib_version/index.html b/cmd/dib_version/index.html new file mode 100644 index 000000000..1fcddc82d --- /dev/null +++ b/cmd/dib_version/index.html @@ -0,0 +1,927 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + dib version - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Version

+ +

dib version

+

print current dib version

+
dib version [flags]
+
+

Options

+
  -h, --help   help for version
+
+

Options inherited from parent commands

+
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively 
+                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, 
+                                     as long as it has at least one Dockerfile in it. (default "docker")
+      --config string                config file (default is $HOME/.config/.dib.yaml)
+      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash
+  -l, --log-level string             Log level. Can be any level supported by logrus ("info", "debug", etc...) (default "info")
+      --placeholder-tag string       Tag used as placeholder in Dockerfile "from" statements, and replaced internally by dib during builds 
+                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so 
+                                     Dockerfiles are always valid (images can still be built even without using dib). (default "latest")
+      --registry-url string          Docker registry URL where images are stored. (default "eu.gcr.io/my-test-repository")
+
+

SEE ALSO

+
    +
  • dib - An Opinionated Docker Image Builder
  • +
+
Auto generated by spf13/cobra on 18-Jan-2024
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/configuration-reference/index.html b/configuration-reference/index.html new file mode 100644 index 000000000..013b4a831 --- /dev/null +++ b/configuration-reference/index.html @@ -0,0 +1,919 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configuration - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Configuration Reference

+
---
+# Log level: "trace", "debug", "info", "warning", "error", "fatal", "panic". Defaults to "info".
+log_level: info
+
+# URL of the registry where the images should be stored.
+#
+# DIB will use the local docker configuration to fetch metadata about existing images. You may use the DOCKER_CONFIG
+# environment variable to set a custom docker config path.
+# See the official Docker documentation (https://docs.docker.com/engine/reference/commandline/cli/#configuration-files).
+# The build backend must also be authenticated to have permission to push images.
+registry_url: registry.example.org
+
+# The placeholder tag DIB uses to mark which images are the reference. Defaults to "latest".
+# Change this value if you don't want to use "latest" tags, or if images may be tagged "latest" by other sources.
+placeholder_tag: latest
+
+# The rate limit can be increased to allow parallel builds. This dramatically reduces the build times
+# when using the Kubernetes executor as build pods are scheduled across multiple nodes.
+rate_limit: 1
+
+# Path to the directory where the reports are generated. The directory will be created if it doesn't exist.
+reports_dir: reports
+
+# The build backend. Can either be set to "docker" or "kaniko".
+#
+# Note: the kaniko backend must be run in a containerized environment such as Docker or Kubernetes.
+# See the "executor" section below.
+backend: docker
+
+# Kaniko settings. Required only if using the Kaniko build backend.
+kaniko:
+  # The build context directory has to be uploaded somewhere in order for the Kaniko pod to retrieve it,
+  # when using remote executor (Kuberentes or remote docker host). Currently, only AWS S3 is supported.
+  context:
+    # Store the build context in an AWS S3 bucket.
+    s3:
+      bucket: my-bucket
+      region: eu-west-3
+  # Executor configuration. It is only necessary to provide valid configurations for all of them,
+  # just pick one up according to your needs.
+  executor:
+    # Configuration for the "docker" executor.
+    docker:
+      image: eu.gcr.io/radio-france-k8s/kaniko:latest
+    # Configuration for the "kubernetes" executor.
+    kubernetes:
+      namespace: kaniko
+      image: eu.gcr.io/radio-france-k8s/kaniko:latest
+      # References a secret containing the Docker configuration file used to authenticate to the registry.
+      docker_config_secret: docker-config-prod
+      env_secrets:
+        # Additional Secret mounted as environment variables.
+        # Used for instance to download the build context from AWS S3.
+        - aws-s3-secret
+      container_override: |
+        resources:
+          limits:
+            cpu: 2
+            memory: 8Gi
+          requests:
+            cpu: 1
+            memory: 2Gi
+      pod_template_override: |
+        spec:
+          affinity:
+            nodeAffinity:
+              requiredDuringSchedulingIgnoredDuringExecution:
+                nodeSelectorTerms:
+                - matchExpressions:
+                  - key: kops.k8s.io/instancegroup
+                    operator: In
+                    values:
+                    - spot-instances
+
+# Enable test suites execution after each image build.
+include_tests:
+  # Enable Goss tests. See the "goss" configuration section below.
+  # To test an image, place a goss.yml file in its build context.
+  # Learn more about Goss: https://github.com/goss-org/goss
+  - goss
+  # Enable trivy vulnerability scans. See the "trivy" configuration section below.
+  # Learn more about Trivy: https://aquasecurity.github.io/trivy
+  - trivy
+
+goss:
+  executor:
+    # Kubernetes executor configuration. Required when using the kubernetes build executor.
+    kubernetes:
+      enabled: true
+      namespace: goss
+      image: aelsabbahy/goss:latest
+      image_pull_secrets:
+      # - private-container-registry
+
+trivy:
+  executor:
+    # Kubernetes executor configuration. Required when using the kubernetes build executor.
+    kubernetes:
+      enabled: true
+      namespace: trivy
+      image: ghcr.io/aquasecurity/trivy:latest
+      # References a secret containing the Docker configuration file used to authenticate to the registry.
+      docker_config_secret: docker-config-ci
+      image_pull_secrets:
+      # - private-container-registry
+      container_override: |
+        resources:
+          limits:
+            cpu: 2
+            memory: 3Gi
+          requests:
+            cpu: 2
+            memory: 1Gi
+        env:
+          - name: GOOGLE_APPLICATION_CREDENTIALS
+            value: /credentials/gcr_service_account.json
+          - name: TRIVY_TIMEOUT
+            value: "30m0s"
+        volumeMounts:
+          - mountPath: /credentials
+            name: private-registry-credentials
+            readOnly: true
+      pod_template_override: |
+        spec:
+          volumes:
+          - name: private-registry-credentials
+            secret:
+              defaultMode: 420
+              secretName: private-registry-credentials
+
+# Easter egg: A path to a file containing a custom wordlist that will be used to
+# generate the humanized hashes for image tags. The list must contain exactly 256 words.
+# You can enable the usage of this list in each Dockerfile with a custom label :
+#   LABEL dib.use-custom-hash-list="true"
+# Please keep in mind each time you change this list the images using the
+# use-custom-hash-list label may see their hashes regenerated.
+humanized_hash_list: ""
+# humanized_hash_list: "custom_wordlist.txt"
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/configuration/index.html b/configuration/index.html new file mode 100644 index 000000000..ab6e09796 --- /dev/null +++ b/configuration/index.html @@ -0,0 +1,879 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configuration - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Configuration

+

DIB can be configured either by command-line flags, environment variables or configuration file.

+

The command-line flags have the highest priority, then environment variables, then config file. You can set some +default values in the configuration file, and then override with environment variables of command-line flags.

+

Command-line flags

+

Example: +

dib build --registry-url=gcr.io/project
+

+

Environment variables

+

DIB auto-discovers configuration from environment variables prefixed with DIB_, followed by the capitalized, +snake_cased flag name.

+

Example: +

export DIB_REGISTRY_URL=gcr.io/project
+dib build
+

+

Configuration file

+

DIB uses a YAML configuration file in addition to command-line arguments. It will look for a file names .dib.yaml +in the current working directory. You can change the file location by setting the --config (-c) flag.

+

The YAML keys are equivalent to the flag names, in snake_case.

+

Example: +

# .dib.yaml
+registryUrl: gcr.io/project
+...
+

+

You can find more examples here. See also the +reference configuration file.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/documentation/index.html b/documentation/index.html new file mode 100644 index 000000000..34fc05f97 --- /dev/null +++ b/documentation/index.html @@ -0,0 +1,857 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Documentation - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Documentation

+

The documentation is generated with mkdocs. It generates a static website in plain HTML +from the Markdown files present in the docs/ directory.

+

We also use the Cobra built-in documentation generator for DIB commands.

+

Local Setup

+

Let's set up a local Python environment and run the documentation server with live-reload.

+
    +
  1. +

    Create a virtual env: +

    python -m venv venv
    +source venv/bin/activate
    +

    +
  2. +
  3. +

    Install dependencies: +

    pip install -r requirements.txt
    +

    +
  4. +
  5. +

    Generate docs of dib commands: +

    make docs
    +

    +
  6. +
  7. +

    Run the mkdocs server: +

    mkdocs serve
    +

    +
  8. +
  9. +

    Go to http://localhost:8000

    +
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/examples/config/reference.yaml b/examples/config/reference.yaml new file mode 100644 index 000000000..86e32e368 --- /dev/null +++ b/examples/config/reference.yaml @@ -0,0 +1,138 @@ +--- +# Log level: "trace", "debug", "info", "warning", "error", "fatal", "panic". Defaults to "info". +log_level: info + +# URL of the registry where the images should be stored. +# +# DIB will use the local docker configuration to fetch metadata about existing images. You may use the DOCKER_CONFIG +# environment variable to set a custom docker config path. +# See the official Docker documentation (https://docs.docker.com/engine/reference/commandline/cli/#configuration-files). +# The build backend must also be authenticated to have permission to push images. +registry_url: registry.example.org + +# The placeholder tag DIB uses to mark which images are the reference. Defaults to "latest". +# Change this value if you don't want to use "latest" tags, or if images may be tagged "latest" by other sources. +placeholder_tag: latest + +# The rate limit can be increased to allow parallel builds. This dramatically reduces the build times +# when using the Kubernetes executor as build pods are scheduled across multiple nodes. +rate_limit: 1 + +# Path to the directory where the reports are generated. The directory will be created if it doesn't exist. +reports_dir: reports + +# The build backend. Can either be set to "docker" or "kaniko". +# +# Note: the kaniko backend must be run in a containerized environment such as Docker or Kubernetes. +# See the "executor" section below. +backend: docker + +# Kaniko settings. Required only if using the Kaniko build backend. +kaniko: + # The build context directory has to be uploaded somewhere in order for the Kaniko pod to retrieve it, + # when using remote executor (Kuberentes or remote docker host). Currently, only AWS S3 is supported. + context: + # Store the build context in an AWS S3 bucket. + s3: + bucket: my-bucket + region: eu-west-3 + # Executor configuration. It is only necessary to provide valid configurations for all of them, + # just pick one up according to your needs. + executor: + # Configuration for the "docker" executor. + docker: + image: eu.gcr.io/radio-france-k8s/kaniko:latest + # Configuration for the "kubernetes" executor. + kubernetes: + namespace: kaniko + image: eu.gcr.io/radio-france-k8s/kaniko:latest + # References a secret containing the Docker configuration file used to authenticate to the registry. + docker_config_secret: docker-config-prod + env_secrets: + # Additional Secret mounted as environment variables. + # Used for instance to download the build context from AWS S3. + - aws-s3-secret + container_override: | + resources: + limits: + cpu: 2 + memory: 8Gi + requests: + cpu: 1 + memory: 2Gi + pod_template_override: | + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kops.k8s.io/instancegroup + operator: In + values: + - spot-instances + +# Enable test suites execution after each image build. +include_tests: + # Enable Goss tests. See the "goss" configuration section below. + # To test an image, place a goss.yml file in its build context. + # Learn more about Goss: https://github.com/goss-org/goss + - goss + # Enable trivy vulnerability scans. See the "trivy" configuration section below. + # Learn more about Trivy: https://aquasecurity.github.io/trivy + - trivy + +goss: + executor: + # Kubernetes executor configuration. Required when using the kubernetes build executor. + kubernetes: + enabled: true + namespace: goss + image: aelsabbahy/goss:latest + image_pull_secrets: + # - private-container-registry + +trivy: + executor: + # Kubernetes executor configuration. Required when using the kubernetes build executor. + kubernetes: + enabled: true + namespace: trivy + image: ghcr.io/aquasecurity/trivy:latest + # References a secret containing the Docker configuration file used to authenticate to the registry. + docker_config_secret: docker-config-ci + image_pull_secrets: + # - private-container-registry + container_override: | + resources: + limits: + cpu: 2 + memory: 3Gi + requests: + cpu: 2 + memory: 1Gi + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /credentials/gcr_service_account.json + - name: TRIVY_TIMEOUT + value: "30m0s" + volumeMounts: + - mountPath: /credentials + name: private-registry-credentials + readOnly: true + pod_template_override: | + spec: + volumes: + - name: private-registry-credentials + secret: + defaultMode: 420 + secretName: private-registry-credentials + +# Easter egg: A path to a file containing a custom wordlist that will be used to +# generate the humanized hashes for image tags. The list must contain exactly 256 words. +# You can enable the usage of this list in each Dockerfile with a custom label : +# LABEL dib.use-custom-hash-list="true" +# Please keep in mind each time you change this list the images using the +# use-custom-hash-list label may see their hashes regenerated. +humanized_hash_list: "" +# humanized_hash_list: "custom_wordlist.txt" diff --git a/examples/quickstart/docker/base/Dockerfile b/examples/quickstart/docker/base/Dockerfile new file mode 100644 index 000000000..3732d678f --- /dev/null +++ b/examples/quickstart/docker/base/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:latest + +LABEL name="base" diff --git a/examples/quickstart/docker/child/Dockerfile b/examples/quickstart/docker/child/Dockerfile new file mode 100644 index 000000000..8d2abaa20 --- /dev/null +++ b/examples/quickstart/docker/child/Dockerfile @@ -0,0 +1,3 @@ +FROM registry.example.com/base:latest + +LABEL name="child" diff --git a/executors/index.html b/executors/index.html new file mode 100644 index 000000000..8161f2b34 --- /dev/null +++ b/executors/index.html @@ -0,0 +1,893 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Executors - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Executors

+

DIB supports multiple build executors. An executor is a platform able to run image builds and tests. +Unlike the build backends which can be explicitely chosen, the executor is automatically selected depending on the type +of operation (build, test), and the executors configured in the configuration file.

+

Build backend compatibility matrix

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ExecutorDockerKaniko
Local
Docker
Kubernetes
+

Local

+

Runs commands using the local exec system call. Use the --local-only flag to force the local executor.

+

Docker

+

Runs commands in a docker container, using the docker run command.

+

Kubernetes

+

Creates pods in a kubernetes cluster, using the kubernetes API. +DIB uses the current kube context, please make do

+

See an example configuration in the +configuration reference section.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/extra-tags/index.html b/extra-tags/index.html new file mode 100644 index 000000000..f7cc2b233 --- /dev/null +++ b/extra-tags/index.html @@ -0,0 +1,787 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Extra Tags - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Extra Tags

+

Images managed by DIB will get tagged with the human-readable version of the computed hash. This is not very convenient +in some cases, for instance if we want to tag an image with the explicit version of the contained software.

+

DIB allows additional tags to be definedusing a label in the Dockerfile: +

LABEL dib.extra-tags="v1.0.0,v1.0,v1"
+

+

The label may contain a coma-separated list of tags to be created when the image +gets promoted with the --release flag.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/images/dib_logo.png b/images/dib_logo.png new file mode 100644 index 000000000..7262fb469 Binary files /dev/null and b/images/dib_logo.png differ diff --git a/images/dib_report.png b/images/dib_report.png new file mode 100644 index 000000000..fcd17a19d Binary files /dev/null and b/images/dib_report.png differ diff --git a/images/favicon.ico b/images/favicon.ico new file mode 100644 index 000000000..4428523ef Binary files /dev/null and b/images/favicon.ico differ diff --git a/index.html b/index.html new file mode 100644 index 000000000..a4ad63372 --- /dev/null +++ b/index.html @@ -0,0 +1,1036 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Introduction

+

DIB is a tool designed to help build multiple Docker images defined within a directory, possibly having dependencies +with one another, in a single command.

+
+

Warning

+

DIB is still at an early stage, development is still ongoing and new minor releases may bring some breaking changes. +This may occur until we release the v1.

+
+

Purpose

+

As containers have become the standard software packaging technology, we have to deal with an ever-increasing number of +image definitions. In DevOps teams especially, we need to manage dozens of Dockerfiles, and the monorepo is often the +solution of choice to store and version them.

+

We use CI/CD pipelines to help by automatically building and pushing the images to a registry, but it's often +inefficient as all the images are rebuilt at every commit/pull request. +There are possible solutions to optimize this, like changesets detection or build cache persistence to increase +efficiency, but it's not an easy task.

+

Also, being able to test and validate the produced images was also something that we were looking forward to.

+

DIB was created to solve these issues, and manage a large number of images in the most efficient way as possible.

+

Concepts

+

Before using DIB, there are important basic concepts to know about, to understand how it works internally.

+

Build Directory

+

DIB needs a path to a root directory containing all the images it should manage. The structure of this directory is not +important, DIB will auto-discover all the Dockerfiles within it recursively.

+

Example with a simple directory structure: +

images/
+├── alpine
+|   └── Dockerfile
+└── debian
+    ├── bookworm
+    |   └── Dockerfile
+    └── bullseye
+        └── Dockerfile
+

+

In order to be discovered, the Dockerfile must contain the name label: +

LABEL name="alpine"
+

+

If the name label is missing, the image will be ignored and DIB won't manage it.

+

Dependency Graph

+

Because some images may depend on other images (when a FROM statement references an image also defined within the +build directory), DIB internally builds a graph of dependencies (DAG). During the build process, DIB waits until all +parent images finish to build before building the children.

+

Example dependency graph: +

graph LR
+  A[alpine] --> B[nodejs];
+  B --> C[foo];
+  D[debian] --> E[bar];
+  B --> E;

+

In this example, DIB will wait for the alpine image to be built before proceeding to nodejs, and then both +alpine and bullseye can be built in parallel (see the --rate-limit build option).

+

Once debian is completed, the build of bar begins, and as soon as nodejs is completed, foo follows.

+

Image Version Tag

+

DIB only builds an image when something has changed in its build context since the last build. To track the changes, +DIB computes a checksum of all the files in the context, and generates a human-readable tag out of it. If any file +changes in the build context (or in the build context of any parent image), the computed human-readable tag changes as +well.

+

DIB knows it needs to rebuild an image if the target tag is not present in the registry.

+

Placeholder Tag

+

When updating images having children, DIB needs to update the tags in FROM statements in all child images +before running the build, to match the newly computed tag.

+

Example:

+

Given a parent image named "parent": +

LABEL name="parent"
+

+

And a child image referencing the parent: +

FROM registry.example.com/parent:REPLACE_ME
+LABEL name="child"
+

+

When we build using the same placeholder tag: +

dib build \
+  --registry-url=registry.example.com \
+  --placeholder-tag=REPLACE_ME
+

+

Then any change to the parent image will be inherited by the child. +By default, the placeholder tag is latest.

+

In some cases, we want to be able to freeze the version of the parent image to a specific tag. To do so, just change the +tag in the FROM statement to be anything else than the placeholder tag: +

FROM registry.example.com/parent:some-specific-tag
+LABEL name="child"
+

+

Then any change to the parent image will not be inherited by the child.

+

Tag promotion

+

DIB always tries to build and push images when it detects some changes, by it doesn't move the reference tag +(latest by default) to the latest version. This allows DIB to run on feature branches without interfering with +one another. Once the changes are satisfying, just re-run DIB with the --release flag to promote the current +version with the reference tag.

+

Example workflow

+

Let's assume we have a simple GitFlow setup, with CI/CD pipelines running on each commit to build docker images with DIB.

+

When one creates a branch from the main branch, and commits some changes to an image. DIB builds and pushes the +cat-south tag, but latest still references the same tag (beacon-two):

+
gitGraph
+       commit id: "autumn-golf"
+       commit id: "beacon-two" tag: "latest"
+       branch feature
+       commit id: "cat-south"
+

Once the feature branch gets merged, the cat-south tag is promoted to latest: +

gitGraph
+       commit id: "autumn-golf"
+       commit id: "beacon-two"
+       branch feature
+       commit id: "cat-south"
+       checkout main
+       merge feature
+       commit id: "cat-south " tag: "latest"

+

License

+

DIB is licensed under the CeCILL V2.1 License

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/install/index.html b/install/index.html new file mode 100644 index 000000000..a42da846b --- /dev/null +++ b/install/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Installation - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Installation Guide

+
+
+
+

Install the latest release on macOS or Linux with:

+
go install github.com/radiofrance/dib@latest
+
+
+
+

Binaries are available to download from the GitHub releases page.

+
+
+
+

Shell autocompletion

+

Configure your shell to load DIB completions:

+
+
+
+

To load completion run:

+
. <(dib completion bash)
+
+

To configure your bash shell to load completions for each session add to your bashrc:

+
# ~/.bashrc or ~/.bash_profile
+command -v dib >/dev/null && . <(dib completion bash)
+
+

If you have an alias for dib, you can extend shell completion to work with that alias:

+
# ~/.bashrc or ~/.bash_profile
+alias tm=dib
+complete -F __start_dib tm
+
+
+
+

To configure your fish shell to load completions +for each session write this script to your completions dir:

+
dib completion fish > ~/.config/fish/completions/dib.fish
+
+
+
+

To load completion run:

+
. <(dib completion powershell)
+
+

To configure your powershell shell to load completions for each session add to your powershell profile:

+

Windows:

+

cd "$env:USERPROFILE\Documents\WindowsPowerShell\Modules"
+dib completion >> dib-completion.ps1
+
+Linux:

+
cd "${XDG_CONFIG_HOME:-"$HOME/.config/"}/powershell/modules"
+dib completion >> dib-completions.ps1
+
+
+
+

To load completion run:

+
. <(dib completion zsh) && compdef _dib dib
+
+

To configure your zsh shell to load completions for each session add to your zshrc:

+
# ~/.zshrc or ~/.profile
+command -v dib >/dev/null && . <(dib completion zsh) && compdef _dib dib
+
+

or write a cached file in one of the completion directories in your ${fpath}:

+
echo "${fpath// /\n}" | grep -i completion
+dib completion zsh > _dib
+
+mv _dib ~/.oh-my-zsh/completions  # oh-my-zsh
+mv _dib ~/.zprezto/modules/completion/external/src/  # zprezto
+
+
+
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/quickstart/index.html b/quickstart/index.html new file mode 100644 index 000000000..df1c9d13e --- /dev/null +++ b/quickstart/index.html @@ -0,0 +1,937 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Quickstart - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Quickstart Guide

+

This guide will show you the basics of DIB. You will build a set of images locally using the local docker daemon.

+

Prerequisites

+

Before using DIB, ensure you have the following dependencies installed:

+
    +
  • Docker for building images on your local computer.
  • +
  • Graphviz for generating visual representation of the dependency graph (optional)
  • +
  • Goss for testing images after build (optional)
  • +
  • Trivy for scanning images for vulnerabilites (optional)
  • +
+

Then, you need to install the DIB command-line by following the installation guide.

+

Make sure you have authenticated access to an OCI registry, in this guide we'll assume it is registry.example.com.

+

Directory structure

+

Let's create a root directory containing 2 Dockerfiles in their own subdirectories. +The structure will look like: +

docker/
+├── base
+|   └── Dockerfile
+└── child
+    └── Dockerfile
+

+

Now create the dockerfile for the base image: +

# docker/base/Dockerfile
+FROM alpine:latest
+
+LABEL name="base"
+

+

The "name" label is mandatory, it is used by DIB to name the current image, by appending the value of the label to the +registry URL. In this case, the image name is registry.example.com/base.

+

Then, create the dockerfile for the child image, which extends the base image: +

# docker/child/Dockerfile
+FROM registry.example.com/base:latest
+
+LABEL name="child"
+

+
+

Tip

+

The directory structure does not matter to DIB. It builds the graph of dependencies based on the FROM statements. +You can have either flat directory structure like shown above, or embed child images context directories +in the parent context.

+
+

Configuration

+

See the configuration section

+

For this guide, we'll use a configuration file as it is the more convenient way for day-to-day usage.

+

Let's create a .dib.yaml next to the docker build directory: +

docker/
+├── base/
+├── child/
+└── .dib.yaml
+

+

Edit the file to set the registry name, used to pull and push DIB-managed images. +

registry_url: registry.example.com
+

+

You can check everything is correct by running dib list: +

$ dib list
+Using config file: docs/examples/.dib.yaml
+  NAME   HASH
+  base   august-berlin-blossom-magnesium
+  child  gee-minnesota-maryland-robin
+

+

You should get the output containing the list of images that DIB has discovered.

+

Building the images

+

When you have all your images definitions in the build directory and configuration set up, you can proceed to building +the images: +

$ dib build
+...
+

+

When it's done, you can run the build command again, and you'll see that DIB does nothing as long as the Dockerfiles +remain unchanged.

+

When you are ready to promote the images to latest, run: +

$ dib build --release
+

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reports/index.html b/reports/index.html new file mode 100644 index 000000000..fe0207394 --- /dev/null +++ b/reports/index.html @@ -0,0 +1,859 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Reporting - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Reporting

+

DIB generates reports after each build. +By default, the reports are generated in the reports directory. You can change it by setting the +--reports-dir option to another location.

+

HTML Report

+

The HTML report is the one you are going to use the most. +Just click on the link displayed on the DIB output to browse the report.

+

In the report you'll find:

+
    +
  • An overview of all images managed by DIB
  • +
  • The build output
  • +
  • The graph of dependencies
  • +
  • Test results and logs
  • +
  • Vulnerability scan results
  • +
+

Preview:

+

HTML Report

+

jUnit Reports

+

Test executors generate reports in jUnit format. +They can then be parsed in a CI pipeline and displayed in a user-friendly fashion.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/roadmap/index.html b/roadmap/index.html new file mode 100644 index 000000000..8cc5ac54d --- /dev/null +++ b/roadmap/index.html @@ -0,0 +1,849 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Roadmap - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Roadmap

+

Road to v1

+

DIB is still a work in progress, but we plan to release a stable version (v1.0.0) after we have added the +following features:

+
    +
  • Per-image configuration: Some images may require additional build args, or have their own tagging scheme. Being + able to configure those settings for each image is necessary.
  • +
+

Future additions

+
    +
  • Multiplatform builds: Ability to build images for different platforms, and generate a manifest-list.
  • +
  • Image signing: Sign images to improve supply chain security.
  • +
+

And more...

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..3660a2624 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

DIB is a tool designed to help build multiple Docker images defined within a directory, possibly having dependencies with one another, in a single command.

Warning

DIB is still at an early stage, development is still ongoing and new minor releases may bring some breaking changes. This may occur until we release the v1.

"},{"location":"#purpose","title":"Purpose","text":"

As containers have become the standard software packaging technology, we have to deal with an ever-increasing number of image definitions. In DevOps teams especially, we need to manage dozens of Dockerfiles, and the monorepo is often the solution of choice to store and version them.

We use CI/CD pipelines to help by automatically building and pushing the images to a registry, but it's often inefficient as all the images are rebuilt at every commit/pull request. There are possible solutions to optimize this, like changesets detection or build cache persistence to increase efficiency, but it's not an easy task.

Also, being able to test and validate the produced images was also something that we were looking forward to.

DIB was created to solve these issues, and manage a large number of images in the most efficient way as possible.

"},{"location":"#concepts","title":"Concepts","text":"

Before using DIB, there are important basic concepts to know about, to understand how it works internally.

"},{"location":"#build-directory","title":"Build Directory","text":"

DIB needs a path to a root directory containing all the images it should manage. The structure of this directory is not important, DIB will auto-discover all the Dockerfiles within it recursively.

Example with a simple directory structure:

images/\n\u251c\u2500\u2500 alpine\n|   \u2514\u2500\u2500 Dockerfile\n\u2514\u2500\u2500 debian\n    \u251c\u2500\u2500 bookworm\n    |   \u2514\u2500\u2500 Dockerfile\n    \u2514\u2500\u2500 bullseye\n        \u2514\u2500\u2500 Dockerfile\n

In order to be discovered, the Dockerfile must contain the name label:

LABEL name=\"alpine\"\n

If the name label is missing, the image will be ignored and DIB won't manage it.

"},{"location":"#dependency-graph","title":"Dependency Graph","text":"

Because some images may depend on other images (when a FROM statement references an image also defined within the build directory), DIB internally builds a graph of dependencies (DAG). During the build process, DIB waits until all parent images finish to build before building the children.

Example dependency graph:

graph LR\n  A[alpine] --> B[nodejs];\n  B --> C[foo];\n  D[debian] --> E[bar];\n  B --> E;

In this example, DIB will wait for the alpine image to be built before proceeding to nodejs, and then both alpine and bullseye can be built in parallel (see the --rate-limit build option).

Once debian is completed, the build of bar begins, and as soon as nodejs is completed, foo follows.

"},{"location":"#image-version-tag","title":"Image Version Tag","text":"

DIB only builds an image when something has changed in its build context since the last build. To track the changes, DIB computes a checksum of all the files in the context, and generates a human-readable tag out of it. If any file changes in the build context (or in the build context of any parent image), the computed human-readable tag changes as well.

DIB knows it needs to rebuild an image if the target tag is not present in the registry.

"},{"location":"#placeholder-tag","title":"Placeholder Tag","text":"

When updating images having children, DIB needs to update the tags in FROM statements in all child images before running the build, to match the newly computed tag.

Example:

Given a parent image named \"parent\":

LABEL name=\"parent\"\n

And a child image referencing the parent:

FROM registry.example.com/parent:REPLACE_ME\nLABEL name=\"child\"\n

When we build using the same placeholder tag:

dib build \\\n--registry-url=registry.example.com \\\n--placeholder-tag=REPLACE_ME\n

Then any change to the parent image will be inherited by the child. By default, the placeholder tag is latest.

In some cases, we want to be able to freeze the version of the parent image to a specific tag. To do so, just change the tag in the FROM statement to be anything else than the placeholder tag:

FROM registry.example.com/parent:some-specific-tag\nLABEL name=\"child\"\n

Then any change to the parent image will not be inherited by the child.

"},{"location":"#tag-promotion","title":"Tag promotion","text":"

DIB always tries to build and push images when it detects some changes, by it doesn't move the reference tag (latest by default) to the latest version. This allows DIB to run on feature branches without interfering with one another. Once the changes are satisfying, just re-run DIB with the --release flag to promote the current version with the reference tag.

Example workflow

Let's assume we have a simple GitFlow setup, with CI/CD pipelines running on each commit to build docker images with DIB.

When one creates a branch from the main branch, and commits some changes to an image. DIB builds and pushes the cat-south tag, but latest still references the same tag (beacon-two):

gitGraph\n       commit id: \"autumn-golf\"\n       commit id: \"beacon-two\" tag: \"latest\"\n       branch feature\n       commit id: \"cat-south\"

Once the feature branch gets merged, the cat-south tag is promoted to latest:

gitGraph\n       commit id: \"autumn-golf\"\n       commit id: \"beacon-two\"\n       branch feature\n       commit id: \"cat-south\"\n       checkout main\n       merge feature\n       commit id: \"cat-south \" tag: \"latest\"

"},{"location":"#license","title":"License","text":"

DIB is licensed under the CeCILL V2.1 License

"},{"location":"backends/","title":"Build Backends","text":"

The build backend is a software or service responsible for actually building the images. DIB itself is not capable of building images, it delegates this part to the build backend.

DIB supports multiple build backends. Currently, available backends are docker and kaniko. You can select the backend to use with the --backend option.

Executor compatibility matrix

Backend Local Docker Kubernetes Docker \u2714 \u2717 \u2717 Kaniko \u2717 \u2714 \u2714"},{"location":"backends/#docker","title":"Docker","text":"

The docker backend uses Docker behind the scenes, and runs docker build You need to have the Docker CLI installed locally to use this backend.

Authentication

The Docker Daemon requires authentication to pull and push images from private registries. Run the docker login command to authenticate.

Authentication settings are stored in a config.json file located by default in $HOME/.docker/. If you need to provide a different configuration, you can set the DOCKER_CONFIG variable to the path to another directory, which should contain a config.json file.

Remote Daemon

If you want to set a custom docker daemon host, you can set the DOCKER_HOST environment variable. The builds will then run on the remote host instead of using the local Docker daemon.

BuildKit

If available, DIB will try to use the BuildKit engine to build images, which is faster than the default Docker build engine.

"},{"location":"backends/#kaniko","title":"Kaniko","text":"

Kaniko offers a way to build container images inside a container or Kubernetes cluster, without the security tradeoff of running a docker daemon container with host privileges.

BuildKit

As Kaniko must run in a container, it requires Docker when running local builds as it uses the docker executor.

See the kaniko section in the configuration reference.

"},{"location":"best-practices/","title":"DIB Best Practices","text":""},{"location":"best-practices/#pin-dependencies-versions-in-dockerfiles","title":"Pin dependencies versions in Dockerfiles","text":"

As DIB only rebuilds images when something changes in the build context (including the Dockerfile), external dependencies should always be pinned to a specific version, so upgrading the dependency triggers a rebuild.

Example:

RUN apt-get install package@1.0.0\n

"},{"location":"best-practices/#use-dockerignore","title":"Use .dockerignore","text":"

The .dockerignore lists file patterns that should not be included in the build context. DIB also ignores those files when it computes the checksum, so no rebuild is triggered when they are modified.

"},{"location":"configuration-reference/","title":"Configuration Reference","text":"
---\n# Log level: \"trace\", \"debug\", \"info\", \"warning\", \"error\", \"fatal\", \"panic\". Defaults to \"info\".\nlog_level: info\n\n# URL of the registry where the images should be stored.\n#\n# DIB will use the local docker configuration to fetch metadata about existing images. You may use the DOCKER_CONFIG\n# environment variable to set a custom docker config path.\n# See the official Docker documentation (https://docs.docker.com/engine/reference/commandline/cli/#configuration-files).\n# The build backend must also be authenticated to have permission to push images.\nregistry_url: registry.example.org\n\n# The placeholder tag DIB uses to mark which images are the reference. Defaults to \"latest\".\n# Change this value if you don't want to use \"latest\" tags, or if images may be tagged \"latest\" by other sources.\nplaceholder_tag: latest\n\n# The rate limit can be increased to allow parallel builds. This dramatically reduces the build times\n# when using the Kubernetes executor as build pods are scheduled across multiple nodes.\nrate_limit: 1\n\n# Path to the directory where the reports are generated. The directory will be created if it doesn't exist.\nreports_dir: reports\n\n# The build backend. Can either be set to \"docker\" or \"kaniko\".\n#\n# Note: the kaniko backend must be run in a containerized environment such as Docker or Kubernetes.\n# See the \"executor\" section below.\nbackend: docker\n\n# Kaniko settings. Required only if using the Kaniko build backend.\nkaniko:\n# The build context directory has to be uploaded somewhere in order for the Kaniko pod to retrieve it,\n# when using remote executor (Kuberentes or remote docker host). Currently, only AWS S3 is supported.\ncontext:\n# Store the build context in an AWS S3 bucket.\ns3:\nbucket: my-bucket\nregion: eu-west-3\n# Executor configuration. It is only necessary to provide valid configurations for all of them,\n# just pick one up according to your needs.\nexecutor:\n# Configuration for the \"docker\" executor.\ndocker:\nimage: eu.gcr.io/radio-france-k8s/kaniko:latest\n# Configuration for the \"kubernetes\" executor.\nkubernetes:\nnamespace: kaniko\nimage: eu.gcr.io/radio-france-k8s/kaniko:latest\n# References a secret containing the Docker configuration file used to authenticate to the registry.\ndocker_config_secret: docker-config-prod\nenv_secrets:\n# Additional Secret mounted as environment variables.\n# Used for instance to download the build context from AWS S3.\n- aws-s3-secret\ncontainer_override: |\nresources:\nlimits:\ncpu: 2\nmemory: 8Gi\nrequests:\ncpu: 1\nmemory: 2Gi\npod_template_override: |\nspec:\naffinity:\nnodeAffinity:\nrequiredDuringSchedulingIgnoredDuringExecution:\nnodeSelectorTerms:\n- matchExpressions:\n- key: kops.k8s.io/instancegroup\noperator: In\nvalues:\n- spot-instances\n\n# Enable test suites execution after each image build.\ninclude_tests:\n# Enable Goss tests. See the \"goss\" configuration section below.\n# To test an image, place a goss.yml file in its build context.\n# Learn more about Goss: https://github.com/goss-org/goss\n- goss\n# Enable trivy vulnerability scans. See the \"trivy\" configuration section below.\n# Learn more about Trivy: https://aquasecurity.github.io/trivy\n- trivy\n\ngoss:\nexecutor:\n# Kubernetes executor configuration. Required when using the kubernetes build executor.\nkubernetes:\nenabled: true\nnamespace: goss\nimage: aelsabbahy/goss:latest\nimage_pull_secrets:\n# - private-container-registry\n\ntrivy:\nexecutor:\n# Kubernetes executor configuration. Required when using the kubernetes build executor.\nkubernetes:\nenabled: true\nnamespace: trivy\nimage: ghcr.io/aquasecurity/trivy:latest\n# References a secret containing the Docker configuration file used to authenticate to the registry.\ndocker_config_secret: docker-config-ci\nimage_pull_secrets:\n# - private-container-registry\ncontainer_override: |\nresources:\nlimits:\ncpu: 2\nmemory: 3Gi\nrequests:\ncpu: 2\nmemory: 1Gi\nenv:\n- name: GOOGLE_APPLICATION_CREDENTIALS\nvalue: /credentials/gcr_service_account.json\n- name: TRIVY_TIMEOUT\nvalue: \"30m0s\"\nvolumeMounts:\n- mountPath: /credentials\nname: private-registry-credentials\nreadOnly: true\npod_template_override: |\nspec:\nvolumes:\n- name: private-registry-credentials\nsecret:\ndefaultMode: 420\nsecretName: private-registry-credentials\n\n# Easter egg: A path to a file containing a custom wordlist that will be used to\n# generate the humanized hashes for image tags. The list must contain exactly 256 words.\n# You can enable the usage of this list in each Dockerfile with a custom label :\n#   LABEL dib.use-custom-hash-list=\"true\"\n# Please keep in mind each time you change this list the images using the\n# use-custom-hash-list label may see their hashes regenerated.\nhumanized_hash_list: \"\"\n# humanized_hash_list: \"custom_wordlist.txt\"\n
"},{"location":"configuration/","title":"Configuration","text":"

DIB can be configured either by command-line flags, environment variables or configuration file.

The command-line flags have the highest priority, then environment variables, then config file. You can set some default values in the configuration file, and then override with environment variables of command-line flags.

"},{"location":"configuration/#command-line-flags","title":"Command-line flags","text":"

Example:

dib build --registry-url=gcr.io/project\n

"},{"location":"configuration/#environment-variables","title":"Environment variables","text":"

DIB auto-discovers configuration from environment variables prefixed with DIB_, followed by the capitalized, snake_cased flag name.

Example:

export DIB_REGISTRY_URL=gcr.io/project\ndib build\n

"},{"location":"configuration/#configuration-file","title":"Configuration file","text":"

DIB uses a YAML configuration file in addition to command-line arguments. It will look for a file names .dib.yaml in the current working directory. You can change the file location by setting the --config (-c) flag.

The YAML keys are equivalent to the flag names, in snake_case.

Example:

# .dib.yaml\nregistryUrl: gcr.io/project\n...\n

You can find more examples here. See also the reference configuration file.

"},{"location":"documentation/","title":"Documentation","text":"

The documentation is generated with mkdocs. It generates a static website in plain HTML from the Markdown files present in the docs/ directory.

We also use the Cobra built-in documentation generator for DIB commands.

"},{"location":"documentation/#local-setup","title":"Local Setup","text":"

Let's set up a local Python environment and run the documentation server with live-reload.

  1. Create a virtual env:

    python -m venv venv\nsource venv/bin/activate\n

  2. Install dependencies:

    pip install -r requirements.txt\n

  3. Generate docs of dib commands:

    make docs\n

  4. Run the mkdocs server:

    mkdocs serve\n

  5. Go to http://localhost:8000

"},{"location":"executors/","title":"Executors","text":"

DIB supports multiple build executors. An executor is a platform able to run image builds and tests. Unlike the build backends which can be explicitely chosen, the executor is automatically selected depending on the type of operation (build, test), and the executors configured in the configuration file.

Build backend compatibility matrix

Executor Docker Kaniko Local \u2714 \u2717 Docker \u2717 \u2714 Kubernetes \u2717 \u2714"},{"location":"executors/#local","title":"Local","text":"

Runs commands using the local exec system call. Use the --local-only flag to force the local executor.

"},{"location":"executors/#docker","title":"Docker","text":"

Runs commands in a docker container, using the docker run command.

"},{"location":"executors/#kubernetes","title":"Kubernetes","text":"

Creates pods in a kubernetes cluster, using the kubernetes API. DIB uses the current kube context, please make do

See an example configuration in the configuration reference section.

"},{"location":"extra-tags/","title":"Extra Tags","text":"

Images managed by DIB will get tagged with the human-readable version of the computed hash. This is not very convenient in some cases, for instance if we want to tag an image with the explicit version of the contained software.

DIB allows additional tags to be definedusing a label in the Dockerfile:

LABEL dib.extra-tags=\"v1.0.0,v1.0,v1\"\n

The label may contain a coma-separated list of tags to be created when the image gets promoted with the --release flag.

"},{"location":"install/","title":"Installation Guide","text":"Install with goFrom binary

Install the latest release on macOS or Linux with:

go install github.com/radiofrance/dib@latest\n

Binaries are available to download from the GitHub releases page.

"},{"location":"install/#shell-autocompletion","title":"Shell autocompletion","text":"

Configure your shell to load DIB completions:

BashFishPowershellZsh

To load completion run:

. <(dib completion bash)\n

To configure your bash shell to load completions for each session add to your bashrc:

# ~/.bashrc or ~/.bash_profile\ncommand -v dib >/dev/null && . <(dib completion bash)\n

If you have an alias for dib, you can extend shell completion to work with that alias:

# ~/.bashrc or ~/.bash_profile\nalias tm=dib\ncomplete -F __start_dib tm\n

To configure your fish shell to load completions for each session write this script to your completions dir:

dib completion fish > ~/.config/fish/completions/dib.fish\n

To load completion run:

. <(dib completion powershell)\n

To configure your powershell shell to load completions for each session add to your powershell profile:

Windows:

cd \"$env:USERPROFILE\\Documents\\WindowsPowerShell\\Modules\"\ndib completion >> dib-completion.ps1\n
Linux:

cd \"${XDG_CONFIG_HOME:-\"$HOME/.config/\"}/powershell/modules\"\ndib completion >> dib-completions.ps1\n

To load completion run:

. <(dib completion zsh) && compdef _dib dib\n

To configure your zsh shell to load completions for each session add to your zshrc:

# ~/.zshrc or ~/.profile\ncommand -v dib >/dev/null && . <(dib completion zsh) && compdef _dib dib\n

or write a cached file in one of the completion directories in your ${fpath}:

echo \"${fpath// /\\n}\" | grep -i completion\ndib completion zsh > _dib\n\nmv _dib ~/.oh-my-zsh/completions  # oh-my-zsh\nmv _dib ~/.zprezto/modules/completion/external/src/  # zprezto\n
"},{"location":"quickstart/","title":"Quickstart Guide","text":"

This guide will show you the basics of DIB. You will build a set of images locally using the local docker daemon.

"},{"location":"quickstart/#prerequisites","title":"Prerequisites","text":"

Before using DIB, ensure you have the following dependencies installed:

  • Docker for building images on your local computer.
  • Graphviz for generating visual representation of the dependency graph (optional)
  • Goss for testing images after build (optional)
  • Trivy for scanning images for vulnerabilites (optional)

Then, you need to install the DIB command-line by following the installation guide.

Make sure you have authenticated access to an OCI registry, in this guide we'll assume it is registry.example.com.

"},{"location":"quickstart/#directory-structure","title":"Directory structure","text":"

Let's create a root directory containing 2 Dockerfiles in their own subdirectories. The structure will look like:

docker/\n\u251c\u2500\u2500 base\n|   \u2514\u2500\u2500 Dockerfile\n\u2514\u2500\u2500 child\n    \u2514\u2500\u2500 Dockerfile\n

Now create the dockerfile for the base image:

# docker/base/Dockerfile\nFROM alpine:latest\n\nLABEL name=\"base\"\n

The \"name\" label is mandatory, it is used by DIB to name the current image, by appending the value of the label to the registry URL. In this case, the image name is registry.example.com/base.

Then, create the dockerfile for the child image, which extends the base image:

# docker/child/Dockerfile\nFROM registry.example.com/base:latest\n\nLABEL name=\"child\"\n

Tip

The directory structure does not matter to DIB. It builds the graph of dependencies based on the FROM statements. You can have either flat directory structure like shown above, or embed child images context directories in the parent context.

"},{"location":"quickstart/#configuration","title":"Configuration","text":"

See the configuration section

For this guide, we'll use a configuration file as it is the more convenient way for day-to-day usage.

Let's create a .dib.yaml next to the docker build directory:

docker/\n\u251c\u2500\u2500 base/\n\u251c\u2500\u2500 child/\n\u2514\u2500\u2500 .dib.yaml\n

Edit the file to set the registry name, used to pull and push DIB-managed images.

registry_url: registry.example.com\n

You can check everything is correct by running dib list:

$ dib list\nUsing config file: docs/examples/.dib.yaml\n  NAME   HASH\n  base   august-berlin-blossom-magnesium\n  child  gee-minnesota-maryland-robin\n

You should get the output containing the list of images that DIB has discovered.

"},{"location":"quickstart/#building-the-images","title":"Building the images","text":"

When you have all your images definitions in the build directory and configuration set up, you can proceed to building the images:

$ dib build\n...\n

When it's done, you can run the build command again, and you'll see that DIB does nothing as long as the Dockerfiles remain unchanged.

When you are ready to promote the images to latest, run:

$ dib build --release\n

"},{"location":"reports/","title":"Reporting","text":"

DIB generates reports after each build. By default, the reports are generated in the reports directory. You can change it by setting the --reports-dir option to another location.

"},{"location":"reports/#html-report","title":"HTML Report","text":"

The HTML report is the one you are going to use the most. Just click on the link displayed on the DIB output to browse the report.

In the report you'll find:

  • An overview of all images managed by DIB
  • The build output
  • The graph of dependencies
  • Test results and logs
  • Vulnerability scan results

Preview:

"},{"location":"reports/#junit-reports","title":"jUnit Reports","text":"

Test executors generate reports in jUnit format. They can then be parsed in a CI pipeline and displayed in a user-friendly fashion.

"},{"location":"roadmap/","title":"Roadmap","text":""},{"location":"roadmap/#road-to-v1","title":"Road to v1","text":"

DIB is still a work in progress, but we plan to release a stable version (v1.0.0) after we have added the following features:

  • Per-image configuration: Some images may require additional build args, or have their own tagging scheme. Being able to configure those settings for each image is necessary.
"},{"location":"roadmap/#future-additions","title":"Future additions","text":"
  • Multiplatform builds: Ability to build images for different platforms, and generate a manifest-list.
  • Image signing: Sign images to improve supply chain security.

And more...

"},{"location":"tests/","title":"Tests","text":"

DIB can execute tests suites to make assertions on images that it just built. This is useful to prevent regressions, and ensure everything work as expected at runtime.

"},{"location":"tests/#goss","title":"Goss","text":"

Goss is a YAML-based serverspec alternative tool for validating a server\u2019s configuration. DIB runs a container from the image to test, and injects the goss binary and configuration, then execute the test itself.

To get started with goss tests, follow the steps below:

  1. Install goss locally (for local builds only)

    Follow the procedure from the official docs

  2. Ensure the goss tests are enabled in configuration:

    # .dib.yaml\ninclude_tests:\n- goss\n

  3. Create a goss.yml file next to the Dockerfile of the image to test

    debian/\n\u251c\u2500\u2500 Dockerfile\n\u2514\u2500\u2500 goss.yml\n

  4. Add some assertions in the goss.yml Basic Example:

    command:\n'echo \"Hello World !\"':\nexit-status: 0\nstdout:\n- 'Hello World !'\n

Read the Goss documentation to learn all possible assertions.

"},{"location":"cmd/dib/","title":"dib","text":""},{"location":"cmd/dib/#dib","title":"dib","text":"

An Opinionated Docker Image Builder

"},{"location":"cmd/dib/#synopsis","title":"Synopsis","text":"

Docker Image Builder helps building a complex image dependency graph

Run dib --help for more information

"},{"location":"cmd/dib/#options","title":"Options","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -h, --help                         help for dib\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib/#see-also","title":"SEE ALSO","text":"
  • dib build - Run docker images builds
  • dib completion - Generate the autocompletion script for the specified shell
  • dib list - Print list of images managed by DIB
  • dib version - print current dib version
"},{"location":"cmd/dib/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_build/","title":"dib build","text":""},{"location":"cmd/dib_build/#dib-build","title":"dib build","text":"

Run docker images builds

"},{"location":"cmd/dib_build/#synopsis","title":"Synopsis","text":"

dib build will compute the graph of images, and compare it to the last built state

For each image, if any file part of its docker context has changed, the image will be rebuilt. Otherwise, dib will create a new tag based on the previous tag

dib build [flags]\n
"},{"location":"cmd/dib_build/#options","title":"Options","text":"
  -b, --backend string           Build Backend used to run image builds. Supported backends: [docker kaniko] (default \"docker\")\n      --dry-run                  Simulate what would happen without actually doing anything dangerous.\n      --force-rebuild            Forces rebuilding the entire image graph, without regarding if the target version already exists.\n  -h, --help                     help for build\n      --include-tests strings    List of test runners to exclude during the test phase.\n      --local-only               Build docker images locally, do not push on remote registry\n      --no-graph                 Disable generation of graph during the build process.\n      --no-retag                 Disable re-tagging images after build. Note that temporary tags with the \"dev-\" prefix may still be pushed to the registry.\n      --no-tests                 Disable execution of tests (unit tests, scans, etc...) after the build.\n      --rate-limit int           Concurrent number of builds that can run simultaneously (default 1)\n      --release dib.extra-tags   Enable release mode to tag all images with extra tags found in the dib.extra-tags Dockerfile labels.\n      --reports-dir string       Path to the directory where the reports are generated. (default \"reports\")\n
"},{"location":"cmd/dib_build/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_build/#see-also","title":"SEE ALSO","text":"
  • dib - An Opinionated Docker Image Builder
"},{"location":"cmd/dib_build/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_completion/","title":"dib completion","text":""},{"location":"cmd/dib_completion/#dib-completion","title":"dib completion","text":"

Generate the autocompletion script for the specified shell

"},{"location":"cmd/dib_completion/#synopsis","title":"Synopsis","text":"

Generate the autocompletion script for dib for the specified shell. See each sub-command's help for details on how to use the generated script.

"},{"location":"cmd/dib_completion/#options","title":"Options","text":"
  -h, --help   help for completion\n
"},{"location":"cmd/dib_completion/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_completion/#see-also","title":"SEE ALSO","text":"
  • dib - An Opinionated Docker Image Builder
  • dib completion bash - Generate the autocompletion script for bash
  • dib completion fish - Generate the autocompletion script for fish
  • dib completion powershell - Generate the autocompletion script for powershell
  • dib completion zsh - Generate the autocompletion script for zsh
"},{"location":"cmd/dib_completion/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_completion_bash/","title":"dib completion bash","text":""},{"location":"cmd/dib_completion_bash/#dib-completion-bash","title":"dib completion bash","text":"

Generate the autocompletion script for bash

"},{"location":"cmd/dib_completion_bash/#synopsis","title":"Synopsis","text":"

Generate the autocompletion script for the bash shell.

This script depends on the 'bash-completion' package. If it is not installed already, you can install it via your OS's package manager.

To load completions in your current shell session:

source <(dib completion bash)\n

To load completions for every new session, execute once:

"},{"location":"cmd/dib_completion_bash/#linux","title":"Linux:","text":"
dib completion bash > /etc/bash_completion.d/dib\n
"},{"location":"cmd/dib_completion_bash/#macos","title":"macOS:","text":"
dib completion bash > $(brew --prefix)/etc/bash_completion.d/dib\n

You will need to start a new shell for this setup to take effect.

dib completion bash\n
"},{"location":"cmd/dib_completion_bash/#options","title":"Options","text":"
  -h, --help              help for bash\n      --no-descriptions   disable completion descriptions\n
"},{"location":"cmd/dib_completion_bash/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_completion_bash/#see-also","title":"SEE ALSO","text":"
  • dib completion - Generate the autocompletion script for the specified shell
"},{"location":"cmd/dib_completion_bash/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_completion_fish/","title":"dib completion fish","text":""},{"location":"cmd/dib_completion_fish/#dib-completion-fish","title":"dib completion fish","text":"

Generate the autocompletion script for fish

"},{"location":"cmd/dib_completion_fish/#synopsis","title":"Synopsis","text":"

Generate the autocompletion script for the fish shell.

To load completions in your current shell session:

dib completion fish | source\n

To load completions for every new session, execute once:

dib completion fish > ~/.config/fish/completions/dib.fish\n

You will need to start a new shell for this setup to take effect.

dib completion fish [flags]\n
"},{"location":"cmd/dib_completion_fish/#options","title":"Options","text":"
  -h, --help              help for fish\n      --no-descriptions   disable completion descriptions\n
"},{"location":"cmd/dib_completion_fish/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_completion_fish/#see-also","title":"SEE ALSO","text":"
  • dib completion - Generate the autocompletion script for the specified shell
"},{"location":"cmd/dib_completion_fish/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_completion_powershell/","title":"dib completion powershell","text":""},{"location":"cmd/dib_completion_powershell/#dib-completion-powershell","title":"dib completion powershell","text":"

Generate the autocompletion script for powershell

"},{"location":"cmd/dib_completion_powershell/#synopsis","title":"Synopsis","text":"

Generate the autocompletion script for powershell.

To load completions in your current shell session:

dib completion powershell | Out-String | Invoke-Expression\n

To load completions for every new session, add the output of the above command to your powershell profile.

dib completion powershell [flags]\n
"},{"location":"cmd/dib_completion_powershell/#options","title":"Options","text":"
  -h, --help              help for powershell\n      --no-descriptions   disable completion descriptions\n
"},{"location":"cmd/dib_completion_powershell/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_completion_powershell/#see-also","title":"SEE ALSO","text":"
  • dib completion - Generate the autocompletion script for the specified shell
"},{"location":"cmd/dib_completion_powershell/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_completion_zsh/","title":"dib completion zsh","text":""},{"location":"cmd/dib_completion_zsh/#dib-completion-zsh","title":"dib completion zsh","text":"

Generate the autocompletion script for zsh

"},{"location":"cmd/dib_completion_zsh/#synopsis","title":"Synopsis","text":"

Generate the autocompletion script for the zsh shell.

If shell completion is not already enabled in your environment you will need to enable it. You can execute the following once:

echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n

To load completions in your current shell session:

source <(dib completion zsh)\n

To load completions for every new session, execute once:

"},{"location":"cmd/dib_completion_zsh/#linux","title":"Linux:","text":"
dib completion zsh > \"${fpath[1]}/_dib\"\n
"},{"location":"cmd/dib_completion_zsh/#macos","title":"macOS:","text":"
dib completion zsh > $(brew --prefix)/share/zsh/site-functions/_dib\n

You will need to start a new shell for this setup to take effect.

dib completion zsh [flags]\n
"},{"location":"cmd/dib_completion_zsh/#options","title":"Options","text":"
  -h, --help              help for zsh\n      --no-descriptions   disable completion descriptions\n
"},{"location":"cmd/dib_completion_zsh/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_completion_zsh/#see-also","title":"SEE ALSO","text":"
  • dib completion - Generate the autocompletion script for the specified shell
"},{"location":"cmd/dib_completion_zsh/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_list/","title":"dib list","text":""},{"location":"cmd/dib_list/#dib-list","title":"dib list","text":"

Print list of images managed by DIB

"},{"location":"cmd/dib_list/#synopsis","title":"Synopsis","text":"

dib list will print a list of all Docker images managed by DIB

dib list [flags]\n
"},{"location":"cmd/dib_list/#options","title":"Options","text":"
  -h, --help            help for list\n  -o, --output string   Output format (console|go-template-file)\n                        You can provide a custom format using go-template: like this: \"-o go-template-file=...\".\n
"},{"location":"cmd/dib_list/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_list/#see-also","title":"SEE ALSO","text":"
  • dib - An Opinionated Docker Image Builder
"},{"location":"cmd/dib_list/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""},{"location":"cmd/dib_version/","title":"dib version","text":""},{"location":"cmd/dib_version/#dib-version","title":"dib version","text":"

print current dib version

dib version [flags]\n
"},{"location":"cmd/dib_version/#options","title":"Options","text":"
  -h, --help   help for version\n
"},{"location":"cmd/dib_version/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
      --build-path string            Path to the directory containing all Dockerfiles to be built by dib. Every Dockerfile will be recursively \n                                     found and added to the build graph. You can provide any subdirectory if you want to focus on a reduced set of images, \n                                     as long as it has at least one Dockerfile in it. (default \"docker\")\n      --config string                config file (default is $HOME/.config/.dib.yaml)\n      --hash-list-file-path string   Path to custom hash list file that will be used to humanize hash\n  -l, --log-level string             Log level. Can be any level supported by logrus (\"info\", \"debug\", etc...) (default \"info\")\n      --placeholder-tag string       Tag used as placeholder in Dockerfile \"from\" statements, and replaced internally by dib during builds \n                                     to use the latest tags from parent images. In release mode, all images will be tagged with the placeholder tag, so \n                                     Dockerfiles are always valid (images can still be built even without using dib). (default \"latest\")\n      --registry-url string          Docker registry URL where images are stored. (default \"eu.gcr.io/my-test-repository\")\n
"},{"location":"cmd/dib_version/#see-also","title":"SEE ALSO","text":"
  • dib - An Opinionated Docker Image Builder
"},{"location":"cmd/dib_version/#auto-generated-by-spf13cobra-on-18-jan-2024","title":"Auto generated by spf13/cobra on 18-Jan-2024","text":""}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..7060b94ca --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,113 @@ + + + + https://radiofrance.github.io/ + 2024-01-18 + daily + + + https://radiofrance.github.io/backends/ + 2024-01-18 + daily + + + https://radiofrance.github.io/best-practices/ + 2024-01-18 + daily + + + https://radiofrance.github.io/configuration-reference/ + 2024-01-18 + daily + + + https://radiofrance.github.io/configuration/ + 2024-01-18 + daily + + + https://radiofrance.github.io/documentation/ + 2024-01-18 + daily + + + https://radiofrance.github.io/executors/ + 2024-01-18 + daily + + + https://radiofrance.github.io/extra-tags/ + 2024-01-18 + daily + + + https://radiofrance.github.io/install/ + 2024-01-18 + daily + + + https://radiofrance.github.io/quickstart/ + 2024-01-18 + daily + + + https://radiofrance.github.io/reports/ + 2024-01-18 + daily + + + https://radiofrance.github.io/roadmap/ + 2024-01-18 + daily + + + https://radiofrance.github.io/tests/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_build/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_completion/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_completion_bash/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_completion_fish/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_completion_powershell/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_completion_zsh/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_list/ + 2024-01-18 + daily + + + https://radiofrance.github.io/cmd/dib_version/ + 2024-01-18 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 000000000..229aff2d3 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tests/index.html b/tests/index.html new file mode 100644 index 000000000..c5b2ef262 --- /dev/null +++ b/tests/index.html @@ -0,0 +1,863 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tests - DIB + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Tests

+

DIB can execute tests suites to make assertions on images that it just built. This is useful to prevent regressions, +and ensure everything work as expected at runtime.

+

Goss

+

Goss is a YAML-based serverspec alternative tool for validating a server’s configuration. DIB runs a container from the +image to test, and injects the goss binary and configuration, then execute the test itself.

+

To get started with goss tests, follow the steps below:

+
    +
  1. +

    Install goss locally (for local builds only)

    +

    Follow the procedure from the official docs

    +
  2. +
  3. +

    Ensure the goss tests are enabled in configuration: +

    # .dib.yaml
    +include_tests:
    +  - goss
    +

    +
  4. +
  5. +

    Create a goss.yml file next to the Dockerfile of the image to test +

    debian/
    +├── Dockerfile
    +└── goss.yml
    +

    +
  6. +
  7. +

    Add some assertions in the goss.yml + Basic Example: +

    command:
    +  'echo "Hello World !"':
    +    exit-status: 0
    +    stdout:
    +      - 'Hello World !'
    +

    +
  8. +
+

Read the Goss documentation to learn all possible assertions.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file