diff --git a/ReadME.MD b/ReadME.MD
new file mode 100644
index 0000000..1c0bb25
--- /dev/null
+++ b/ReadME.MD
@@ -0,0 +1,12 @@
+# Kubernetes 1.21
+cs:~containers/charmed-kubernetes-657
+
+```Bash
+charm pull cs:~containers/containerd-119
+charm pull cs:~containers/kubeapi-load-balancer-786
+charm pull cs:~containers/etcd-583
+charm pull cs:~containers/easyrsa-373
+charm pull cs:~containers/kubernetes-master-990
+charm pull cs:~containers/kubernetes-worker-757
+charm pull cs:~containers/calico-812
+```
diff --git a/calico/.build.manifest b/calico/.build.manifest
new file mode 100644
index 0000000..0471d71
--- /dev/null
+++ b/calico/.build.manifest
@@ -0,0 +1,561 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
+ "url": "layer:leadership"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "63c6d240f29b0366c3839dacd4e25d63a368da36",
+ "url": "calico"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382",
+ "url": "interface:etcd"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b",
+ "url": "interface:kubernetes-cni"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/build.yml": [
+ "calico",
+ "static",
+ "4892e4eb72fb0d0efaa1c6b62f8f132cc69ea2b967c9604c91d4f16e0ec6e26b"
+ ],
+ ".github/workflows/tox.yaml": [
+ "calico",
+ "static",
+ "8de54f40fc8e9385b79ed8d19e6ea765bdd6c48185fbd8bd7142834990982d45"
+ ],
+ ".gitignore": [
+ "calico",
+ "static",
+ "3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "CONTRIBUTING.md": [
+ "calico",
+ "static",
+ "fa04ec96762f4edc071c7b0097223c121e33fd6769226562681646577d7b1146"
+ ],
+ "DEVELOPING.md": [
+ "calico",
+ "static",
+ "ccb2d8ad4b5c328d810c53fa43b41f6641af0f002a45d548f6ed9d9f546d3dbe"
+ ],
+ "LICENSE": [
+ "calico",
+ "static",
+ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
+ ],
+ "Makefile": [
+ "calico",
+ "static",
+ "d49436a9eb35598691285b00e6a678ad74e391a818d55989116e264f40fcd9e6"
+ ],
+ "README.md": [
+ "calico",
+ "static",
+ "d2d26569f5a63b1be2e23835346ed2e8b0b13cdd74a6efb161221d2462a58dc5"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "build-calico-resource.sh": [
+ "calico",
+ "static",
+ "1c98f05945166e17cf9c530a6ee064092a323e5529639474b07f380210959acb"
+ ],
+ "config.yaml": [
+ "calico",
+ "dynamic",
+ "c6014840f64c5c4cab24fa54735832e36ecd11de15ab6e34ecedf5839feca695"
+ ],
+ "copyright": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-leadership": [
+ "layer:leadership",
+ "static",
+ "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "exec.d/docker-compose/charm-pre-install": [
+ "calico",
+ "static",
+ "2760db1047cdc4beeb974923c693bf824d45a9ee88fb50496efada92461aceb8"
+ ],
+ "hooks/cni-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/etcd/.gitignore": [
+ "interface:etcd",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/etcd/README.md": [
+ "interface:etcd",
+ "static",
+ "93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386"
+ ],
+ "hooks/relations/etcd/__init__.py": [
+ "interface:etcd",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/etcd/interface.yaml": [
+ "interface:etcd",
+ "static",
+ "ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be"
+ ],
+ "hooks/relations/etcd/peers.py": [
+ "interface:etcd",
+ "static",
+ "99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834"
+ ],
+ "hooks/relations/etcd/provides.py": [
+ "interface:etcd",
+ "static",
+ "3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9"
+ ],
+ "hooks/relations/etcd/requires.py": [
+ "interface:etcd",
+ "static",
+ "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
+ ],
+ "hooks/relations/kubernetes-cni/.gitignore": [
+ "interface:kubernetes-cni",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/kubernetes-cni/.travis.yml": [
+ "interface:kubernetes-cni",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ "hooks/relations/kubernetes-cni/README.md": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/__init__.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/interface.yaml": [
+ "interface:kubernetes-cni",
+ "static",
+ "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d"
+ ],
+ "hooks/relations/kubernetes-cni/provides.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e"
+ ],
+ "hooks/relations/kubernetes-cni/requires.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa"
+ ],
+ "hooks/relations/kubernetes-cni/tox.ini": [
+ "interface:kubernetes-cni",
+ "static",
+ "bf0fb0883583bb3deebd17e7ebd4599d9f3770c19a6fc7683044654b6e982c90"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "calico",
+ "static",
+ "49b68e61506d639d3c859e9477338469d1d44f7b76ad381ff152c728c71c43d9"
+ ],
+ "layer.yaml": [
+ "calico",
+ "dynamic",
+ "8547f11913f564feb1ca4f6674788385e237b4d8d1939c5a8675c6bbb4f1d8e3"
+ ],
+ "lib/calico_common.py": [
+ "calico",
+ "static",
+ "ec886f86a4505148016a540652c51afd7bf8ee4ef3b21db368e10ded2b9569be"
+ ],
+ "lib/calico_upgrade.py": [
+ "calico",
+ "static",
+ "1200e9016b1db2f2a853033d04126adff1d4d43ccb29c48a613232e06f33a8c4"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/charms/leadership.py": [
+ "layer:leadership",
+ "static",
+ "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "calico",
+ "dynamic",
+ "b1a1e252fb9eac35a8b1a10564b400a07d5c810d8ceed1a1e3460bea314886bb"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "reactive/__init__.py": [
+ "layer:leadership",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/calico.py": [
+ "calico",
+ "static",
+ "3037c342634848aca03bb3a8b818102ae13e4d82942e1c8f8761c8465b808e14"
+ ],
+ "reactive/leadership.py": [
+ "layer:leadership",
+ "static",
+ "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "script/bootstrap": [
+ "calico",
+ "static",
+ "1985d9a07e8d764351530f6eb1b81bef6a4c035dc75422c03f4672ceaf1a4c18"
+ ],
+ "script/build": [
+ "calico",
+ "static",
+ "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633"
+ ],
+ "script/upload": [
+ "calico",
+ "static",
+ "db3cd04f1d4c2a2be12becb8d62bf879701cbca3da0d458b4c362439b32ebfc1"
+ ],
+ "templates/10-calico.conflist": [
+ "calico",
+ "static",
+ "9332e14d9422781530cd13fef5748e3d06fcce7f4221123f625c3a7e09238abb"
+ ],
+ "templates/calico-node.service": [
+ "calico",
+ "static",
+ "beae0c32a25f911a37363064af7bfa96a39f14ab99b3060412491382a81ddaa7"
+ ],
+ "templates/calicoctl": [
+ "calico",
+ "static",
+ "b913dfdce8de51aa9a13950817e4101f8f4229052927a272fff5b37a4370537f"
+ ],
+ "templates/policy-controller.yaml": [
+ "calico",
+ "static",
+ "3bd0f0f714a8c7f418fdb7556f10097d963dbf0c6232a41606163c30022f0e9e"
+ ],
+ "tests/00-setup": [
+ "calico",
+ "static",
+ "111c079b81d260bbcd716dcf41672372a4cf4aaa14154b6c3055deeedae37a06"
+ ],
+ "tests/10-deploy": [
+ "calico",
+ "static",
+ "e895f7720cd0ce3956082054f15b0cebce683aa44f66bb32038bab1e693bf74f"
+ ],
+ "tests/conftest.py": [
+ "calico",
+ "static",
+ "2c58cb11bf276805f586c05c20bf4ba15a7431b5c23ea3323dc4256ddc34c4d2"
+ ],
+ "tests/test_calico.py": [
+ "calico",
+ "static",
+ "2de748d396d66f5c581ade110a3f8a709e6aabe50f97502e1d0ac0ec817c223d"
+ ],
+ "tox.ini": [
+ "calico",
+ "static",
+ "1ce2114e5084c1f5bc99f1768c0566f77b8216166974de3b17c47e97b54aba7d"
+ ],
+ "version": [
+ "calico",
+ "dynamic",
+ "44a751fcf4d3ba30169f70f2b7b84b9cfc381b6f514c41fe4d3ef8afe2ff9086"
+ ],
+ "wheelhouse.txt": [
+ "calico",
+ "dynamic",
+ "cb5ab8b42ebef8ae5adc80de0d7c39f84aeaa97207298aa453142bff87c39f8c"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.22.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/click-7.1.2.tar.gz": [
+ "calico",
+ "dynamic",
+ "d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"
+ ],
+ "wheelhouse/conctl-py35-0.1.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "fad07dd70e04338f2df7fa5a38448223613b87b09a571ea5d2b3c780bb1eca0b"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.16.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/calico/.github/workflows/build.yml b/calico/.github/workflows/build.yml
new file mode 100644
index 0000000..043ccad
--- /dev/null
+++ b/calico/.github/workflows/build.yml
@@ -0,0 +1,16 @@
+name: Builds calico charm
+on: [push, pull_request]
+
+jobs:
+ build:
+ name: Build charm
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+ - name: Run build
+ run: |
+ make charm
diff --git a/calico/.github/workflows/tox.yaml b/calico/.github/workflows/tox.yaml
new file mode 100644
index 0000000..d43940d
--- /dev/null
+++ b/calico/.github/workflows/tox.yaml
@@ -0,0 +1,22 @@
+name: Run tests with Tox
+
+on: [push]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.6, 3.7, 3.8, 3.9]
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Tox and any other packages
+ run: pip install tox
+ - name: Run Tox
+ run: tox -e py # Run tox using the version of Python in `PATH`
diff --git a/calico/.gitignore b/calico/.gitignore
new file mode 100644
index 0000000..cc02691
--- /dev/null
+++ b/calico/.gitignore
@@ -0,0 +1,3 @@
+.tox/
+__pycache__/
+*.pyc
diff --git a/calico/.travis/profile-update.yaml b/calico/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/calico/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/calico/CONTRIBUTING.md b/calico/CONTRIBUTING.md
new file mode 100644
index 0000000..158b811
--- /dev/null
+++ b/calico/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+The [DEVELOPING.md](./DEVELOPING.md) page has some useful information regarding building and testing. To contribute code
+to this project, the workflow is as follows:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-calico/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-calico.md)
\ No newline at end of file
diff --git a/calico/DEVELOPING.md b/calico/DEVELOPING.md
new file mode 100644
index 0000000..391c15b
--- /dev/null
+++ b/calico/DEVELOPING.md
@@ -0,0 +1,62 @@
+# Developing layer-calico
+
+## Installing build dependencies
+
+To install build dependencies:
+
+```
+sudo snap install charm --classic
+sudo apt install docker.io
+sudo usermod -aG docker $USER
+```
+
+After running these commands, terminate your shell session and start a new one
+to pick up the modified user groups.
+
+## Building the charm
+
+To build the charm:
+```
+charm build
+```
+
+By default, this will build the charm and place it in
+`/tmp/charm-builds/calico`.
+
+## Building resources
+
+To build resources:
+```
+./build-calico-resources.sh
+```
+
+This will produce several .tar.gz files that you will need to attach to the
+charm when you deploy it.
+
+## Testing
+
+You can test a locally built calico charm by deploying it with Charmed
+Kubernetes.
+
+Create a file named `local-calico.yaml` that contains the following (with paths
+adjusted to fit your environment):
+```
+applications:
+ calico:
+ charm: /tmp/charm-builds/calico
+ resources:
+ calico: /path/to/layer-calico/calico-amd64.tar.gz
+ calico-upgrade: /path/to/layer-calico/calico-upgrade-amd64.tar.gz
+```
+
+Then deploy Charmed Kubernetes with your locally built calico charm:
+
+```
+juju deploy cs:~containers/kubernetes-calico --overlay local-calico.yaml
+```
+
+## Helpful links
+
+* [Getting Started with charm development](https://jaas.ai/docs/getting-started-with-charm-development)
+* [Charm tools documentation](https://jaas.ai/docs/charm-tools)
+* [Charmed Kubernetes Calico documentation](https://ubuntu.com/kubernetes/docs/cni-calico)
diff --git a/calico/LICENSE b/calico/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/calico/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/calico/Makefile b/calico/Makefile
new file mode 100644
index 0000000..9348753
--- /dev/null
+++ b/calico/Makefile
@@ -0,0 +1,18 @@
+CHANNEL ?= unpublished
+CHARM := calico
+
+setup-env:
+ bash script/bootstrap
+
+charm: setup-env
+ bash script/build
+
+upload:
+ifndef NAMESPACE
+ $(error NAMESPACE is not set)
+endif
+
+ env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload
+
+.phony: charm upload setup-env
+all: charm
diff --git a/calico/README.md b/calico/README.md
new file mode 100644
index 0000000..2bf4541
--- /dev/null
+++ b/calico/README.md
@@ -0,0 +1,22 @@
+# Calico Charm
+
+Calico is a new approach to virtual networking and network security for containers,
+VMs, and bare metal services, that provides a rich set of security enforcement
+capabilities running on top of a highly scalable and efficient virtual network fabric.
+
+This charm will deploy calico as a background service, and configure CNI for
+use with calico, on any principal charm that implements the [kubernetes-cni][]
+interface.
+
+This charm is a component of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-calico).
+
+[kubernetes-cni]: https://github.com/juju-solutions/interface-kubernetes-cni
+
+# Developers
+
+## Build charm
+
+```
+make charm
+```
diff --git a/calico/bin/charm-env b/calico/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/calico/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/calico/bin/layer_option b/calico/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/calico/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/calico/build-calico-resource.sh b/calico/build-calico-resource.sh
new file mode 100755
index 0000000..7dfe533
--- /dev/null
+++ b/calico/build-calico-resource.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+set -eux
+
+# This script will fetch binaries and create resource tarballs for use by
+# charm-[push|release]. The arm64 binaries are not available upsteram for
+# v2.6, so we must build them and host them somewhere ourselves. The steps
+# for doing that are documented here:
+#
+# https://gist.github.com/kwmonroe/9b5f8dac2c17f93629a1a3868b22d671
+
+# Supported calico architectures
+arches="amd64 arm64"
+calicoctl_version="v3.10.1"
+calico_cni_version="v3.10.1"
+
+function fetch_and_validate() {
+ # fetch a binary and make sure it's what we expect (executable > 20MB)
+ min_bytes=20000000
+ location="${1-}"
+ if [ -z ${location} ]; then
+ echo "$0: Missing location parameter for fetch_and_validate"
+ exit 1
+ fi
+
+ # remove everything up until the last slash to get the filename
+ filename=$(echo "${location##*/}")
+ case ${location} in
+ http*)
+ fetch_cmd="wget ${location} -O ./${filename}"
+ ;;
+ *)
+ fetch_cmd="scp ${location} ./${filename}"
+ ;;
+ esac
+ ${fetch_cmd}
+
+ # Make sure we fetched something big enough
+ actual_bytes=$(wc -c < ${filename})
+ if [ $actual_bytes -le $min_bytes ]; then
+ echo "$0: ${filename} should be at least ${min_bytes} bytes"
+ exit 1
+ fi
+
+ # Make sure we fetched a binary
+ if ! file ${filename} 2>&1 | grep -q 'executable'; then
+ echo "$0: ${filename} is not an executable"
+ exit 1
+ fi
+}
+
+for arch in ${arches}; do
+ rm -rf resource-build-$arch
+ mkdir resource-build-$arch
+ pushd resource-build-$arch
+ fetch_and_validate \
+ https://github.com/projectcalico/calicoctl/releases/download/$calicoctl_version/calicoctl-linux-$arch
+ fetch_and_validate \
+ https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-$arch
+ fetch_and_validate \
+ https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-ipam-$arch
+ mv calicoctl-linux-$arch calicoctl
+ mv calico-$arch calico
+ mv calico-ipam-$arch calico-ipam
+
+ chmod +x calicoctl calico calico-ipam
+ tar -zcvf ../calico-$arch.tar.gz .
+
+ popd
+ rm -rf resource-build-$arch
+done
+
+# calico-upgrade resource
+for arch in ${arches}; do
+ rm -rf resource-build-upgrade
+ mkdir resource-build-upgrade
+ pushd resource-build-upgrade
+ if [ $arch = amd64 ]; then
+ fetch_and_validate \
+ https://github.com/projectcalico/calico-upgrade/releases/download/v1.0.5/calico-upgrade
+ chmod +x calico-upgrade
+ elif [ $arch = arm64 ]; then
+ # git clone https://github.com/projectcalico/calico-upgrade repo
+ # pushd repo
+ # git checkout 2de2f7a0f26ef3bb1c2cabf06b2dcbcc2bba1d35 # known good commit
+ # make build ARCH=arm64
+ # popd
+ # mv repo/dist/calico-upgrade-linux-$arch ./calico-upgrade
+
+ # arm64 builds are failing due to an upstream issue:
+ # https://github.com/projectcalico/calico-upgrade/issues/42
+ # For now, we will pull a previously built binary from the charm store.
+ wget https://api.jujucharms.com/charmstore/v5/~containers/calico-698/resource/calico-upgrade-arm64/462 \
+ -O calico-upgrade-arm64.tar.gz
+ tar -xf calico-upgrade-arm64.tar.gz
+ checksum="$(sha256sum calico-upgrade)"
+ if [ "$checksum" != "7a07816c26ad19f526ab2f57353043dabd708a48185268b41493e458c59b797d calico-upgrade" ]; then
+ echo 'ERROR: checksum does not match, aborting'
+ exit 1
+ fi
+ else
+ echo "Unsupported architecture for calico-upgrade: $arch"
+ exit 1
+ fi
+ tar -zcvf ../calico-upgrade-$arch.tar.gz ./calico-upgrade
+ popd
+ rm -rf resource-build-upgrade
+done
+
+# calico-upgrade arm64
+rm -rf resource-build-upgrade-arm64
+
+touch calico-node-image.tar.gz
diff --git a/calico/config.yaml b/calico/config.yaml
new file mode 100644
index 0000000..f0b04bc
--- /dev/null
+++ b/calico/config.yaml
@@ -0,0 +1,145 @@
+"options":
+ "calico-node-image":
+ "type": "string"
+ # Please refer to layer-canal/versioning.md before changing the version below.
+ "default": "rocks.canonical.com:443/cdk/calico/node:v3.10.1"
+ "description": |
+ The image id to use for calico/node.
+ "calico-policy-image":
+ "type": "string"
+ "default": "rocks.canonical.com:443/cdk/calico/kube-controllers:v3.10.1"
+ "description": |
+ The image id to use for calico/kube-controllers.
+ "ipip":
+ "type": "string"
+ "default": "Never"
+ "description": |
+ IPIP encapsulation mode. Must be one of "Always", "CrossSubnet", or "Never".
+ This is incompatible with VXLAN encapsulation. If VXLAN encapsulation is
+ enabled, then this must be set to "Never".
+ "vxlan":
+ "type": "string"
+ "default": "Never"
+ "description": |
+ VXLAN encapsulation mode. Must be one of "Always", "CrossSubnet", or "Never".
+ This is incompatible with IPIP encapsulation. If IPIP encapsulation is
+ enabled, then this must be set to "Never".
+ "veth-mtu":
+ "type": "int"
+ "default": !!null ""
+ "description": |
+ Set veth MTU size. This should be set to the MTU size of the base network.
+
+ If VXLAN is enabled, then the charm will automatically subtract 50 from the
+ specified MTU size.
+
+ If IPIP is enabled, then the charm will automatically subtract 20 from the
+ specified MTU size.
+ "nat-outgoing":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ NAT outgoing traffic
+ "cidr":
+ "type": "string"
+ "default": "192.168.0.0/16"
+ "description": |
+ Network CIDR assigned to Calico. This is applied to the default Calico
+ pool, and is also communicated to the Kubernetes charms for use in
+ kube-proxy configuration.
+ "manage-pools":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ If true, a default pool is created using the cidr and ipip charm
+ configuration values.
+
+ Warning: When manage-pools is enabled, the charm will delete any pools
+ that are unrecognized.
+ "global-as-number":
+ "type": "int"
+ "default": !!int "64512"
+ "description": |
+ Global AS number.
+ "subnet-as-numbers":
+ "type": "string"
+ "default": "{}"
+ "description": |
+ Mapping of subnets to AS numbers, specified as YAML. Each Calico node
+ will be assigned an AS number based on the entries in this mapping.
+
+ Example value: "{10.0.0.0/24: 64512, 10.0.1.0/24: 64513}"
+
+ If a node's IP matches any of the specified subnets, then the
+ corresponding AS number is used instead of the global one.
+
+ If a node's IP matches no subnets, then the global AS number will be
+ used instead.
+
+ If a node's IP matches multiple subnets, then the most specific subnet
+ will be used, e.g. a /24 subnet will take precedence over a /16.
+ "unit-as-numbers":
+ "type": "string"
+ "default": "{}"
+ "description": |
+ Mapping of unit IDs to AS numbers, specified as YAML. Each Calico node
+ will be assigned an AS number based on the entries in this mapping.
+
+ Example value: "{0: 64512, 1: 64513}"
+
+ This takes precedence over global-as-number and subnet-as-numbers.
+ "node-to-node-mesh":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ When enabled, each Calico node will peer with every other Calico node in
+ the cluster.
+ "global-bgp-peers":
+ "type": "string"
+ "default": "[]"
+ "description": |
+ List of global BGP peers. Each BGP peer is specified with an address and
+ an as-number.
+
+ Example value: "[{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}]"
+ "subnet-bgp-peers":
+ "type": "string"
+ "default": "{}"
+ "description": |
+ Mapping of subnets to lists of BGP peers. Each BGP peer is specified with
+ an address and an as-number.
+
+ Example value: "{10.0.0.0/24: [{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}], 10.0.1.0/24: [{address: 10.0.1.1, as-number: 65002}]}"
+
+ If a node's IP matches multiple subnets, then peerings will be added for
+ each matched subnet.
+ "unit-bgp-peers":
+ "type": "string"
+ "default": "{}"
+ "description": |
+ Mapping of unit IDs to lists of BGP peers. Each BGP peer is specified
+ with an address and an as-number.
+
+ Example value: "{0: [{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}], 1: [{address: 10.0.1.1, as-number: 65002}]}"
+ "route-reflector-cluster-ids":
+ "type": "string"
+ "default": "{}"
+ "description": |
+ Mapping of unit IDs to route reflector cluster IDs. Assigning a route
+ reflector cluster ID allows the node to function as a route reflector.
+
+ Example value: "{0: 224.0.0.1, 2: 224.0.0.1}"
+ "ignore-loose-rpf":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Enable or disable IgnoreLooseRPF for Calico Felix. This is only used
+ when rp_filter is set to a value of 2.
+ "disable-vxlan-tx-checksumming":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ When set to true, if VXLAN encapsulation is in use, then the charm will
+ disable TX checksumming on the vxlan.calico network interface. This works
+ around an upstream issue in Calico:
+ https://github.com/projectcalico/calico/issues/3145
diff --git a/calico/copyright b/calico/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/calico/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/calico/copyright.layer-basic b/calico/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/calico/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/calico/copyright.layer-leadership b/calico/copyright.layer-leadership
new file mode 100644
index 0000000..08b983f
--- /dev/null
+++ b/calico/copyright.layer-leadership
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Leadership Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/calico/copyright.layer-options b/calico/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/calico/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/calico/docs/status.md b/calico/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/calico/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/calico/exec.d/docker-compose/charm-pre-install b/calico/exec.d/docker-compose/charm-pre-install
new file mode 100644
index 0000000..2c724c7
--- /dev/null
+++ b/calico/exec.d/docker-compose/charm-pre-install
@@ -0,0 +1,4 @@
+# This stubs out charm-pre-install coming from layer-docker as a workaround for
+# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed.
+
+
diff --git a/calico/hooks/cni-relation-broken b/calico/hooks/cni-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/cni-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/cni-relation-changed b/calico/hooks/cni-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/cni-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/cni-relation-created b/calico/hooks/cni-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/cni-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/cni-relation-departed b/calico/hooks/cni-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/cni-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/cni-relation-joined b/calico/hooks/cni-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/cni-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/config-changed b/calico/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/etcd-relation-broken b/calico/hooks/etcd-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/etcd-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/etcd-relation-changed b/calico/hooks/etcd-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/etcd-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/etcd-relation-created b/calico/hooks/etcd-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/etcd-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/etcd-relation-departed b/calico/hooks/etcd-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/etcd-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/etcd-relation-joined b/calico/hooks/etcd-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/etcd-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/hook.template b/calico/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/install b/calico/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/leader-elected b/calico/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/leader-settings-changed b/calico/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/post-series-upgrade b/calico/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/pre-series-upgrade b/calico/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/relations/etcd/.gitignore b/calico/hooks/relations/etcd/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/calico/hooks/relations/etcd/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/calico/hooks/relations/etcd/README.md b/calico/hooks/relations/etcd/README.md
new file mode 100644
index 0000000..9ed51dd
--- /dev/null
+++ b/calico/hooks/relations/etcd/README.md
@@ -0,0 +1,89 @@
+# Overview
+
+This interface layer handles the communication with Etcd via the `etcd`
+interface.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established, but Etcd may not
+ yet have provided any connection or service information.
+
+ * `{relation_name}.available` Etcd has provided its connection string
+ information, and is ready to serve as a KV store.
+ The provided information can be accessed via the following methods:
+ * `etcd.get_connection_string()`
+ * `etcd.get_version()`
+ * `{relation_name}.tls.available` Etcd has provided the connection string
+ information, and the tls client credentials to communicate with it.
+ The client credentials can be accessed via:
+ * `{relation_name}.get_client_credentials()` returning a dictionary of
+ the clinet certificate, key and CA.
+ * `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience
+ method to save the client certificate, key and CA to files of your
+ choosing.
+
+
+For example, a common application for this is configuring an applications
+backend key/value storage, like Docker.
+
+```python
+@when('etcd.available', 'docker.available')
+def swarm_etcd_cluster_setup(etcd):
+ con_string = etcd.connection_string().replace('http', 'etcd')
+ opts = {}
+ opts['connection_string'] = con_string
+ render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts)
+
+```
+
+
+## Provides
+
+A charm providing this interface is providing the Etcd rest api service.
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` One or more clients of any type have
+ been related. The charm should call the following methods to provide the
+ appropriate information to the clients:
+
+ * `{relation_name}.set_connection_string(string, version)`
+ * `{relation_name}.set_client_credentials(key, cert, ca)`
+
+Example:
+
+```python
+@when('db.connected')
+def send_connection_details(db):
+ cert = leader_get('client_certificate')
+ key = leader_get('client_key')
+ ca = leader_get('certificate_authority')
+ # Set the key, cert, and ca on the db relation
+ db.set_client_credentials(key, cert, ca)
+
+ port = hookenv.config().get('port')
+ # Get all the peers participating in the cluster relation.
+ addresses = cluster.get_peer_addresses()
+ connections = []
+ for address in addresses:
+ connections.append('http://{0}:{1}'.format(address, port))
+ # Set the connection string on the db relation.
+ db.set_connection_string(','.join(conections))
+```
+
+
+# Contact Information
+
+### Maintainer
+- Charles Butler
+
+
+# Etcd
+
+- [Etcd](https://coreos.com/etcd/) home page
+- [Etcd bug trackers](https://github.com/coreos/etcd/issues)
+- [Etcd Juju Charm](http://jujucharms.com/?text=etcd)
diff --git a/calico/hooks/relations/etcd/__init__.py b/calico/hooks/relations/etcd/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/calico/hooks/relations/etcd/interface.yaml b/calico/hooks/relations/etcd/interface.yaml
new file mode 100644
index 0000000..929b1d5
--- /dev/null
+++ b/calico/hooks/relations/etcd/interface.yaml
@@ -0,0 +1,4 @@
+name: etcd
+summary: Interface for relating to ETCD
+version: 2
+maintainer: "Charles Butler "
diff --git a/calico/hooks/relations/etcd/peers.py b/calico/hooks/relations/etcd/peers.py
new file mode 100644
index 0000000..90980d1
--- /dev/null
+++ b/calico/hooks/relations/etcd/peers.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdPeer(RelationBase):
+ '''This class handles peer relation communication by setting states that
+ the reactive code can respond to. '''
+
+ scope = scopes.UNIT
+
+ @hook('{peers:etcd}-relation-joined')
+ def peer_joined(self):
+ '''A new peer has joined, set the state on the unit so we can track
+ when they are departed. '''
+ conv = self.conversation()
+ conv.set_state('{relation_name}.joined')
+
+ @hook('{peers:etcd}-relation-departed')
+ def peers_going_away(self):
+ '''Trigger a state on the unit that it is leaving. We can use this
+ state in conjunction with the joined state to determine which unit to
+ unregister from the etcd cluster. '''
+ conv = self.conversation()
+ conv.remove_state('{relation_name}.joined')
+ conv.set_state('{relation_name}.departing')
+
+ def dismiss(self):
+ '''Remove the departing state from all other units in the conversation,
+ and we can resume normal operation.
+ '''
+ for conv in self.conversations():
+ conv.remove_state('{relation_name}.departing')
+
+ def get_peers(self):
+ '''Return a list of names for the peers participating in this
+ conversation scope. '''
+ peers = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ peers.append(conversation.scope)
+ return peers
+
+ def set_db_ingress_address(self, address):
+ '''Set the ingress address belonging to the db relation.'''
+ for conversation in self.conversations():
+ conversation.set_remote('db-ingress-address', address)
+
+ def get_db_ingress_addresses(self):
+ '''Return a list of db ingress addresses'''
+ addresses = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ address = conversation.get_remote('db-ingress-address')
+ if address:
+ addresses.append(address)
+ return addresses
diff --git a/calico/hooks/relations/etcd/provides.py b/calico/hooks/relations/etcd/provides.py
new file mode 100644
index 0000000..3cfc174
--- /dev/null
+++ b/calico/hooks/relations/etcd/provides.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdProvider(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:etcd}-relation-{joined,changed}')
+ def joined_or_changed(self):
+ ''' Set the connected state from the provides side of the relation. '''
+ self.set_state('{relation_name}.connected')
+
+ @hook('{provides:etcd}-relation-{broken,departed}')
+ def broken_or_departed(self):
+ '''Remove connected state from the provides side of the relation. '''
+ conv = self.conversation()
+ if len(conv.units) == 1:
+ conv.remove_state('{relation_name}.connected')
+
+ def set_client_credentials(self, key, cert, ca):
+ ''' Set the client credentials on the global conversation for this
+ relation. '''
+ self.set_remote('client_key', key)
+ self.set_remote('client_ca', ca)
+ self.set_remote('client_cert', cert)
+
+ def set_connection_string(self, connection_string, version=''):
+ ''' Set the connection string on the global conversation for this
+ relation. '''
+ # Note: Version added as a late-dependency for 2 => 3 migration
+ # If no version is specified, consumers should presume etcd 2.x
+ self.set_remote('connection_string', connection_string)
+ self.set_remote('version', version)
diff --git a/calico/hooks/relations/etcd/requires.py b/calico/hooks/relations/etcd/requires.py
new file mode 100644
index 0000000..435532f
--- /dev/null
+++ b/calico/hooks/relations/etcd/requires.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdClient(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{requires:etcd}-relation-{joined,changed}')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ self.set_state('{relation_name}.connected')
+
+ if self.get_connection_string():
+ self.set_state('{relation_name}.available')
+ # Get the ca, key, cert from the relation data.
+ cert = self.get_client_credentials()
+ # The tls state depends on the existance of the ca, key and cert.
+ if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa
+ self.set_state('{relation_name}.tls.available')
+
+ @hook('{requires:etcd}-relation-{broken, departed}')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+ self.remove_state('{relation_name}.tls.available')
+
+ def connection_string(self):
+ ''' This method is depreciated but ensures backward compatibility
+ @see get_connection_string(self). '''
+ return self.get_connection_string()
+
+ def get_connection_string(self):
+ ''' Return the connection string, if available, or None. '''
+ return self.get_remote('connection_string')
+
+ def get_version(self):
+ ''' Return the version of the etd protocol being used, or None. '''
+ return self.get_remote('version')
+
+ def get_client_credentials(self):
+ ''' Return a dict with the client certificate, ca and key to
+ communicate with etcd using tls. '''
+ return {'client_cert': self.get_remote('client_cert'),
+ 'client_key': self.get_remote('client_key'),
+ 'client_ca': self.get_remote('client_ca')}
+
+ def save_client_credentials(self, key, cert, ca):
+ ''' Save all the client certificates for etcd to local files. '''
+ self._save_remote_data('client_cert', cert)
+ self._save_remote_data('client_key', key)
+ self._save_remote_data('client_ca', ca)
+
+ def _save_remote_data(self, key, path):
+ ''' Save the remote data to a file indicated by path creating the
+ parent directory if needed.'''
+ value = self.get_remote(key)
+ if value:
+ parent = os.path.dirname(path)
+ if not os.path.isdir(parent):
+ os.makedirs(parent)
+ with open(path, 'w') as stream:
+ stream.write(value)
diff --git a/calico/hooks/relations/kubernetes-cni/.gitignore b/calico/hooks/relations/kubernetes-cni/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/calico/hooks/relations/kubernetes-cni/.travis.yml b/calico/hooks/relations/kubernetes-cni/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/calico/hooks/relations/kubernetes-cni/README.md b/calico/hooks/relations/kubernetes-cni/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/calico/hooks/relations/kubernetes-cni/__init__.py b/calico/hooks/relations/kubernetes-cni/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/calico/hooks/relations/kubernetes-cni/interface.yaml b/calico/hooks/relations/kubernetes-cni/interface.yaml
new file mode 100644
index 0000000..7e3c123
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/interface.yaml
@@ -0,0 +1,6 @@
+name: kubernetes-cni
+summary: Interface for relating various CNI implementations
+version: 0
+maintainer: "George Kraft "
+ignore:
+- tests
diff --git a/calico/hooks/relations/kubernetes-cni/provides.py b/calico/hooks/relations/kubernetes-cni/provides.py
new file mode 100644
index 0000000..0b4aada
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/provides.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+
+from charmhelpers.core import hookenv
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag
+
+
+class CNIPluginProvider(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.config_available())
+ if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')):
+ clear_flag(self.expand_name('{endpoint_name}.configured'))
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ def set_config(self, is_master, kubeconfig_path):
+ ''' Relays a dict of kubernetes configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'is_master': is_master,
+ 'kubeconfig_path': kubeconfig_path
+ })
+ set_flag(self.expand_name('{endpoint_name}.configured'))
+
+ def config_available(self):
+ ''' Ensures all config from the CNI plugin is available. '''
+ goal_state = hookenv.goal_state()
+ related_apps = [
+ app for app in goal_state.get('relations', {}).get(self.endpoint_name, '')
+ if '/' not in app
+ ]
+ if not related_apps:
+ return False
+ configs = self.get_configs()
+ return all(
+ 'cidr' in config and 'cni-conf-file' in config
+ for config in [
+ configs.get(related_app, {}) for related_app in related_apps
+ ]
+ )
+
+ def get_config(self, default=None):
+ ''' Get CNI config for one related application.
+
+ If default is specified, and there is a related application with a
+ matching name, then that application is chosen. Otherwise, the
+ application is chosen alphabetically.
+
+ Whichever application is chosen, that application's CNI config is
+ returned.
+ '''
+ configs = self.get_configs()
+ if not configs:
+ return {}
+ elif default and default not in configs:
+ msg = 'relation not found for default CNI %s, ignoring' % default
+ hookenv.log(msg, level='WARN')
+ return self.get_config()
+ elif default:
+ return configs.get(default, {})
+ else:
+ return configs.get(sorted(configs)[0], {})
+
+ def get_configs(self):
+ ''' Get CNI configs for all related applications.
+
+ This returns a mapping of application names to CNI configs. Here's an
+ example return value:
+ {
+ 'flannel': {
+ 'cidr': '10.1.0.0/16',
+ 'cni-conf-file': '10-flannel.conflist'
+ },
+ 'calico': {
+ 'cidr': '192.168.0.0/16',
+ 'cni-conf-file': '10-calico.conflist'
+ }
+ }
+ '''
+ return {
+ relation.application_name: relation.joined_units.received_raw
+ for relation in self.relations if relation.application_name
+ }
diff --git a/calico/hooks/relations/kubernetes-cni/requires.py b/calico/hooks/relations/kubernetes-cni/requires.py
new file mode 100644
index 0000000..039b912
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/requires.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+
+from charms.reactive import Endpoint
+from charms.reactive import when_any, when_not
+from charms.reactive import set_state, remove_state
+
+
+class CNIPluginClient(Endpoint):
+
+ @when_any('endpoint.{endpoint_name}.joined',
+ 'endpoint.{endpoint_name}.changed')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ set_state(self.expand_name('{endpoint_name}.connected'))
+ config = self.get_config()
+ if config['is_master'] == 'True':
+ set_state(self.expand_name('{endpoint_name}.is-master'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ elif config['is_master'] == 'False':
+ set_state(self.expand_name('{endpoint_name}.is-worker'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ else:
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+ remove_state(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ remove_state(self.expand_name('{endpoint_name}.connected'))
+ remove_state(self.expand_name('{endpoint_name}.is-master'))
+ remove_state(self.expand_name('{endpoint_name}.is-worker'))
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+
+ def get_config(self):
+ ''' Get the kubernetes configuration information. '''
+ return self.all_joined_units.received_raw
+
+ def set_config(self, cidr, cni_conf_file):
+ ''' Sets the CNI configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'cidr': cidr,
+ 'cni-conf-file': cni_conf_file
+ })
diff --git a/calico/hooks/relations/kubernetes-cni/tox.ini b/calico/hooks/relations/kubernetes-cni/tox.ini
new file mode 100644
index 0000000..077622b
--- /dev/null
+++ b/calico/hooks/relations/kubernetes-cni/tox.ini
@@ -0,0 +1,23 @@
+[tox]
+skipsdist = True
+envlist = lint,py3
+
+[tox:travis]
+3.5: lint,py3
+3.6: lint,py3
+3.7: lint,py3
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}:{toxinidir}/lib
+deps =
+ pyyaml
+ pytest
+ flake8
+ ipdb
+commands = pytest --tb native -s {posargs}
+
+[testenv:lint]
+envdir = {toxworkdir}/py3
+commands = flake8 --max-line-length=88 {toxinidir}
diff --git a/calico/hooks/start b/calico/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/stop b/calico/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/update-status b/calico/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/hooks/upgrade-charm b/calico/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/calico/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/calico/icon.svg b/calico/icon.svg
new file mode 100644
index 0000000..96ce38e
--- /dev/null
+++ b/calico/icon.svg
@@ -0,0 +1,1378 @@
+
+
diff --git a/calico/layer.yaml b/calico/layer.yaml
new file mode 100644
index 0000000..e1dc12c
--- /dev/null
+++ b/calico/layer.yaml
@@ -0,0 +1,20 @@
+"includes":
+- "layer:options"
+- "interface:etcd"
+- "interface:kubernetes-cni"
+- "layer:basic"
+- "layer:leadership"
+- "layer:status"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"]
+"options":
+ "basic":
+ "packages": []
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "leadership": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "calico": {}
+"repo": "https://github.com/juju-solutions/layer-calico.git"
+"is": "calico"
diff --git a/calico/lib/calico_common.py b/calico/lib/calico_common.py
new file mode 100644
index 0000000..ffe2165
--- /dev/null
+++ b/calico/lib/calico_common.py
@@ -0,0 +1,10 @@
+from subprocess import check_output
+
+
+def arch():
+ '''Return the package architecture as a string.'''
+ # Get the package architecture for this system.
+ architecture = check_output(['dpkg', '--print-architecture']).rstrip()
+ # Convert the binary result into a string.
+ architecture = architecture.decode('utf-8')
+ return architecture
diff --git a/calico/lib/calico_upgrade.py b/calico/lib/calico_upgrade.py
new file mode 100644
index 0000000..0daaf56
--- /dev/null
+++ b/calico/lib/calico_upgrade.py
@@ -0,0 +1,108 @@
+import os
+import shutil
+import yaml
+from subprocess import check_call, check_output, CalledProcessError
+from calico_common import arch
+from charms.reactive import endpoint_from_flag
+from charmhelpers.core.hookenv import resource_get, status_set, log
+
+CALICOCTL_PATH = '/opt/calicoctl'
+ETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')
+ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
+ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
+CALICO_UPGRADE_DIR = '/opt/calico-upgrade'
+ETCD2_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd2.yaml'
+ETCD3_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd3.yaml'
+
+
+class ResourceMissing(Exception):
+ pass
+
+
+class DryRunFailed(Exception):
+ pass
+
+
+def cleanup():
+ shutil.rmtree(CALICO_UPGRADE_DIR, ignore_errors=True)
+
+
+def configure():
+ cleanup()
+ os.makedirs(CALICO_UPGRADE_DIR)
+
+ # Extract calico-upgrade resource
+ architecture = arch()
+ if architecture == 'amd64':
+ resource_name = 'calico-upgrade'
+ else:
+ resource_name = 'calico-upgrade-' + architecture
+ archive = resource_get(resource_name)
+
+ if not archive:
+ message = 'Missing calico-upgrade resource'
+ status_set('blocked', message)
+ raise ResourceMissing(message)
+
+ check_call(['tar', '-xvf', archive, '-C', CALICO_UPGRADE_DIR])
+
+ # Configure calico-upgrade, etcd2 (data source)
+ etcd = endpoint_from_flag('etcd.available')
+ etcd_endpoints = etcd.get_connection_string()
+ etcd2_data = {
+ 'apiVersion': 'v1',
+ 'kind': 'calicoApiConfig',
+ 'metadata': None,
+ 'spec': {
+ 'datastoreType': 'etcdv2',
+ 'etcdEndpoints': etcd_endpoints,
+ 'etcdKeyFile': ETCD_KEY_PATH,
+ 'etcdCertFile': ETCD_CERT_PATH,
+ 'etcdCACertFile': ETCD_CA_PATH
+ }
+ }
+ with open(ETCD2_DATA_PATH, 'w') as f:
+ yaml.dump(etcd2_data, f)
+
+ # Configure calico-upgrade, etcd3 (data destination)
+ etcd3_data = {
+ 'apiVersion': 'projectcalico.org/v3',
+ 'kind': 'CalicoAPIConfig',
+ 'metadata': None,
+ 'spec': {
+ 'datastoreType': 'etcdv3',
+ 'etcdEndpoints': etcd_endpoints,
+ 'etcdKeyFile': ETCD_KEY_PATH,
+ 'etcdCertFile': ETCD_CERT_PATH,
+ 'etcdCACertFile': ETCD_CA_PATH
+ }
+ }
+ with open(ETCD3_DATA_PATH, 'w') as f:
+ yaml.dump(etcd3_data, f)
+
+
+def invoke(*args):
+ cmd = [CALICO_UPGRADE_DIR + '/calico-upgrade'] + list(args)
+ cmd += [
+ '--apiconfigv1', ETCD2_DATA_PATH,
+ '--apiconfigv3', ETCD3_DATA_PATH
+ ]
+ try:
+ return check_output(cmd)
+ except CalledProcessError as e:
+ log(e.output)
+ raise
+
+
+def dry_run():
+ output = invoke('dry-run', '--output-dir', CALICO_UPGRADE_DIR)
+ if b'Successfully validated v1 to v3 conversion' not in output:
+ raise DryRunFailed()
+
+
+def start():
+ invoke('start', '--no-prompts', '--output-dir', CALICO_UPGRADE_DIR)
+
+
+def complete():
+ invoke('complete', '--no-prompts')
diff --git a/calico/lib/charms/layer/__init__.py b/calico/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/calico/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/calico/lib/charms/layer/basic.py b/calico/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/calico/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/calico/lib/charms/layer/execd.py b/calico/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/calico/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<.
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.reactive import not_unless
+
+
+__all__ = ['leader_get', 'leader_set']
+
+
+@not_unless('leadership.is_leader')
+def leader_set(*args, **kw):
+ '''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
+
+ Settings may either be passed in as a single dictionary, or using
+ keyword arguments. All values must be strings.
+
+ The leadership.set.{key} reactive state will be set while the
+ leadership hook environment setting remains set.
+
+ Changed leadership settings will set the leadership.changed.{key}
+ and leadership.changed states. These states will remain set until
+ the following hook.
+
+ These state changes take effect immediately on the leader, and
+ in future hooks run on non-leaders. In this way both leaders and
+ non-leaders can share handlers, waiting on these states.
+ '''
+ if args:
+ if len(args) > 1:
+ raise TypeError('leader_set() takes 1 positional argument but '
+ '{} were given'.format(len(args)))
+ else:
+ settings = dict(args[0])
+ else:
+ settings = {}
+ settings.update(kw)
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+
+ for key, value in settings.items():
+ if value != previous.get(key):
+ reactive.set_state('leadership.changed.{}'.format(key))
+ reactive.set_state('leadership.changed')
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ hookenv.leader_set(settings)
+ unitdata.kv().update(settings, prefix='leadership.settings.')
+
+
+def leader_get(attribute=None):
+ '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
+ return hookenv.leader_get(attribute)
diff --git a/calico/make_docs b/calico/make_docs
new file mode 100644
index 0000000..dcd4c1f
--- /dev/null
+++ b/calico/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import os
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ sys.path.insert(0, 'lib')
+ sys.path.insert(1, 'reactive')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
+ if os.path.exists('.unit-state.db'):
+ os.remove('.unit-state.db')
diff --git a/calico/metadata.yaml b/calico/metadata.yaml
new file mode 100644
index 0000000..1dd2061
--- /dev/null
+++ b/calico/metadata.yaml
@@ -0,0 +1,46 @@
+"name": "calico"
+"summary": "A robust Software Defined Network from Project Calico"
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Konstantinos Tsakalozos "
+- "Mike Wilson "
+- "Kevin Monroe "
+- "Joe Borg "
+"description": |
+ Deploys Calico as a background service and configures CNI for use with
+ calico on any principal charm that implements the kubernetes-cni interface.
+"tags":
+- "networking"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "etcd":
+ "interface": "etcd"
+ "cni":
+ "interface": "kubernetes-cni"
+ "scope": "container"
+"resources":
+ "calico":
+ "type": "file"
+ "filename": "calico.tar.gz"
+ "description": "Calico resource tarball for amd64"
+ "calico-arm64":
+ "type": "file"
+ "filename": "calico.tar.gz"
+ "description": "Calico resource tarball for arm64"
+ "calico-upgrade":
+ "type": "file"
+ "filename": "calico-upgrade.tar.gz"
+ "description": "calico-upgrade tool for amd64"
+ "calico-upgrade-arm64":
+ "type": "file"
+ "filename": "calico-upgrade.tar.gz"
+ "description": "calico-upgrade tool for arm64"
+ "calico-node-image":
+ "type": "file"
+ "filename": "calico-node-image.tar.gz"
+ "description": "calico-node container image"
+"subordinate": !!bool "true"
diff --git a/calico/pydocmd.yml b/calico/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/calico/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/calico/reactive/__init__.py b/calico/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/calico/reactive/calico.py b/calico/reactive/calico.py
new file mode 100644
index 0000000..4d49298
--- /dev/null
+++ b/calico/reactive/calico.py
@@ -0,0 +1,782 @@
+import os
+import yaml
+import gzip
+import traceback
+import ipaddress
+import calico_upgrade
+
+from conctl import getContainerRuntimeCtl
+from socket import gethostname
+from subprocess import check_call, check_output, CalledProcessError, STDOUT
+
+from charms.leadership import leader_get, leader_set
+from charms.reactive import when, when_not, when_any, set_state, remove_state
+from charms.reactive import hook, is_state
+from charms.reactive import endpoint_from_flag
+from charms.reactive import data_changed
+from charmhelpers.core.hookenv import (
+ log,
+ resource_get,
+ network_get,
+ unit_private_ip,
+ is_leader,
+ local_unit,
+ config as charm_config,
+ atexit,
+ env_proxy_settings
+)
+from charmhelpers.core.host import (
+ arch,
+ service,
+ service_restart,
+ service_running
+)
+from charmhelpers.core.templating import render
+from charms.layer import status
+
+# TODO:
+# - Handle the 'stop' hook by stopping and uninstalling all the things.
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+try:
+ CTL = getContainerRuntimeCtl()
+ set_state('calico.ctl.ready')
+except RuntimeError:
+ log(traceback.format_exc())
+ remove_state('calico.ctl.ready')
+
+CALICOCTL_PATH = '/opt/calicoctl'
+ETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')
+ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
+ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
+CALICO_UPGRADE_DIR = '/opt/calico-upgrade'
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ remove_state('calico.binaries.installed')
+ remove_state('calico.cni.configured')
+ remove_state('calico.service.installed')
+ remove_state('calico.pool.configured')
+ remove_state('calico.npc.deployed')
+ remove_state('calico.image.pulled')
+ remove_state('calico.bgp.globals.configured')
+ remove_state('calico.node.configured')
+ remove_state('calico.bgp.peers.configured')
+ try:
+ log('Deleting /etc/cni/net.d/10-calico.conf')
+ os.remove('/etc/cni/net.d/10-calico.conf')
+ except FileNotFoundError as e:
+ log(e)
+ if is_leader() and not leader_get('calico-v3-data-ready'):
+ leader_set({
+ 'calico-v3-data-migration-needed': True,
+ 'calico-v3-npc-cleanup-needed': True,
+ 'calico-v3-completion-needed': True
+ })
+
+
+@when('leadership.is_leader', 'leadership.set.calico-v3-data-migration-needed',
+ 'etcd.available', 'calico.etcd-credentials.installed')
+def upgrade_v3_migrate_data():
+ status.maintenance('Migrating data to Calico 3')
+ try:
+ calico_upgrade.configure()
+ calico_upgrade.dry_run()
+ calico_upgrade.start()
+ except Exception:
+ log(traceback.format_exc())
+ message = 'Calico upgrade failed, see debug log'
+ status.blocked(message)
+ return
+ leader_set({'calico-v3-data-migration-needed': None})
+
+
+@when('leadership.is_leader')
+@when_not('leadership.set.calico-v3-data-migration-needed')
+def v3_data_ready():
+ leader_set({'calico-v3-data-ready': True})
+
+
+@when('leadership.is_leader', 'leadership.set.calico-v3-data-ready',
+ 'leadership.set.calico-v3-npc-cleanup-needed')
+def upgrade_v3_npc_cleanup():
+ status.maintenance('Cleaning up Calico 2 policy controller')
+
+ resources = [
+ ('Deployment', 'kube-system', 'calico-policy-controller'),
+ ('ClusterRoleBinding', None, 'calico-policy-controller'),
+ ('ClusterRole', None, 'calico-policy-controller'),
+ ('ServiceAccount', 'kube-system', 'calico-policy-controller')
+ ]
+
+ for kind, namespace, name in resources:
+ args = ['delete', '--ignore-not-found', kind, name]
+ if namespace:
+ args += ['-n', namespace]
+ try:
+ kubectl(*args)
+ except CalledProcessError:
+ log('Failed to cleanup %s %s %s' % (kind, namespace, name))
+ return
+
+ leader_set({'calico-v3-npc-cleanup-needed': None})
+
+
+@when('leadership.is_leader', 'leadership.set.calico-v3-completion-needed',
+ 'leadership.set.calico-v3-data-ready', 'calico.binaries.installed',
+ 'calico.service.installed', 'calico.npc.deployed')
+@when_not('leadership.set.calico-v3-npc-cleanup-needed')
+def upgrade_v3_complete():
+ status.maintenance('Completing Calico 3 upgrade')
+ try:
+ calico_upgrade.configure()
+ calico_upgrade.complete()
+ calico_upgrade.cleanup()
+ except Exception:
+ log(traceback.format_exc())
+ message = 'Calico upgrade failed, see debug log'
+ status.blocked(message)
+ return
+ leader_set({'calico-v3-completion-needed': None})
+
+
+@when('leadership.set.calico-v3-data-ready')
+@when_not('calico.binaries.installed')
+def install_calico_binaries():
+ ''' Unpack the Calico binaries. '''
+ # on intel, the resource is called 'calico'; other arches have a suffix
+ architecture = arch()
+ if architecture == "amd64":
+ resource_name = 'calico'
+ else:
+ resource_name = 'calico-{}'.format(architecture)
+
+ try:
+ archive = resource_get(resource_name)
+ except Exception:
+ message = 'Error fetching the calico resource.'
+ log(message)
+ status.blocked(message)
+ return
+
+ if not archive:
+ message = 'Missing calico resource.'
+ log(message)
+ status.blocked(message)
+ return
+
+ filesize = os.stat(archive).st_size
+ if filesize < 1000000:
+ message = 'Incomplete calico resource'
+ log(message)
+ status.blocked(message)
+ return
+
+ status.maintenance('Unpacking calico resource.')
+
+ charm_dir = os.getenv('CHARM_DIR')
+ unpack_path = os.path.join(charm_dir, 'files', 'calico')
+ os.makedirs(unpack_path, exist_ok=True)
+ cmd = ['tar', 'xfz', archive, '-C', unpack_path]
+ log(cmd)
+ check_call(cmd)
+
+ apps = [
+ {'name': 'calicoctl', 'path': CALICOCTL_PATH},
+ {'name': 'calico', 'path': '/opt/cni/bin'},
+ {'name': 'calico-ipam', 'path': '/opt/cni/bin'},
+ ]
+
+ for app in apps:
+ unpacked = os.path.join(unpack_path, app['name'])
+ app_path = os.path.join(app['path'], app['name'])
+ install = ['install', '-v', '-D', unpacked, app_path]
+ check_call(install)
+
+ calicoctl_path = '/usr/local/bin/calicoctl'
+ render('calicoctl', calicoctl_path, {})
+ os.chmod(calicoctl_path, 0o775)
+
+ set_state('calico.binaries.installed')
+
+
+@when('calico.binaries.installed', 'etcd.available')
+def update_calicoctl_env():
+ env = get_calicoctl_env()
+ lines = ['export %s=%s' % item for item in sorted(env.items())]
+ output = '\n'.join(lines)
+ with open('/opt/calicoctl/calicoctl.env', 'w') as f:
+ f.write(output)
+
+
+@when('calico.binaries.installed')
+@when_not('etcd.connected')
+def blocked_without_etcd():
+ status.blocked('Waiting for relation to etcd')
+
+
+@when('etcd.tls.available')
+@when_not('calico.etcd-credentials.installed')
+def install_etcd_credentials():
+ etcd = endpoint_from_flag('etcd.available')
+ etcd.save_client_credentials(ETCD_KEY_PATH, ETCD_CERT_PATH, ETCD_CA_PATH)
+ # register initial etcd data so that we can detect changes
+ data_changed('calico.etcd.data', (etcd.get_connection_string(),
+ etcd.get_client_credentials()))
+ set_state('calico.etcd-credentials.installed')
+
+
+@when('etcd.tls.available', 'calico.service.installed')
+def check_etcd_changes():
+ etcd = endpoint_from_flag('etcd.available')
+ if data_changed('calico.etcd.data', (etcd.get_connection_string(),
+ etcd.get_client_credentials())):
+ etcd.save_client_credentials(ETCD_KEY_PATH,
+ ETCD_CERT_PATH,
+ ETCD_CA_PATH)
+ remove_state('calico.service.installed')
+ remove_state('calico.npc.deployed')
+
+
+def get_mtu():
+ ''' Get user-specified MTU size, adjusted to make room for encapsulation
+ headers. https://docs.projectcalico.org/networking/mtu
+ '''
+ mtu = charm_config('veth-mtu')
+ if not mtu:
+ return None
+
+ if charm_config('vxlan') != 'Never':
+ return mtu - 50
+ elif charm_config('ipip') != 'Never':
+ return mtu - 20
+ return mtu
+
+
+def get_bind_address():
+ ''' Returns a non-fan bind address for the cni endpoint '''
+ try:
+ data = network_get('cni')
+ except NotImplementedError:
+ # Juju < 2.1
+ return unit_private_ip()
+
+ if 'bind-addresses' not in data:
+ # Juju < 2.3
+ return unit_private_ip()
+
+ for bind_address in data['bind-addresses']:
+ if bind_address['interfacename'].startswith('fan-'):
+ continue
+ return bind_address['addresses'][0]['address']
+
+ # If we made it here, we didn't find a non-fan CNI bind-address, which is
+ # unexpected. Let's log a message and play it safe.
+ log('Could not find a non-fan bind-address. Using private-address.')
+ return unit_private_ip()
+
+
+@when('calico.binaries.installed', 'etcd.available',
+ 'calico.etcd-credentials.installed',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.service.installed')
+def install_calico_service():
+ ''' Install the calico-node systemd service. '''
+ status.maintenance('Installing calico-node service.')
+ etcd = endpoint_from_flag('etcd.available')
+ service_path = os.path.join(os.sep, 'lib', 'systemd', 'system',
+ 'calico-node.service')
+ ip_versions = {net.version for net in get_networks(charm_config('cidr'))}
+ ip4 = get_bind_address() if 4 in ip_versions else "none"
+ ip6 = "autodetect" if 6 in ip_versions else "none"
+ render('calico-node.service', service_path, {
+ 'connection_string': etcd.get_connection_string(),
+ 'etcd_key_path': ETCD_KEY_PATH,
+ 'etcd_ca_path': ETCD_CA_PATH,
+ 'etcd_cert_path': ETCD_CERT_PATH,
+ 'nodename': gethostname(),
+ # specify IP so calico doesn't grab a silly one from, say, lxdbr0
+ 'ip': ip4,
+ 'ip6': ip6,
+ 'mtu': get_mtu(),
+ 'calico_node_image': charm_config('calico-node-image'),
+ 'ignore_loose_rpf': charm_config('ignore-loose-rpf'),
+ 'lc_all': os.environ.get('LC_ALL', 'C.UTF-8'),
+ 'lang': os.environ.get('LANG', 'C.UTF-8')
+ })
+ check_call(['systemctl', 'daemon-reload'])
+ service_restart('calico-node')
+ service('enable', 'calico-node')
+ set_state('calico.service.installed')
+
+
+@when('config.changed.veth-mtu')
+def configure_mtu():
+ remove_state('calico.service.installed')
+ remove_state('calico.cni.configured')
+
+
+@when('config.changed.ignore-loose-rpf')
+def ignore_loose_rpf_changed():
+ remove_state('calico.service.installed')
+
+
+@when('calico.binaries.installed', 'etcd.available',
+ 'calico.etcd-credentials.installed',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.pool.configured')
+def configure_calico_pool():
+ ''' Configure Calico IP pool. '''
+ config = charm_config()
+ if not config['manage-pools']:
+ log('Skipping pool configuration')
+ set_state('calico.pool.configured')
+ return
+
+ status.maintenance('Configuring Calico IP pool')
+
+ try:
+ # remove unrecognized pools, and default pool if CIDR doesn't match
+ pools = calicoctl_get('pool')['items']
+
+ cidrs = tuple(cidr.strip() for cidr in config['cidr'].split(','))
+ names = tuple('ipv{}'.format(get_network(cidr).version)
+ for cidr in cidrs)
+ pool_names_to_delete = [
+ pool['metadata']['name'] for pool in pools
+ if pool['metadata']['name'] not in names
+ or pool['spec']['cidr'] not in cidrs
+ ]
+
+ for pool_name in pool_names_to_delete:
+ log('Deleting pool: %s' % pool_name)
+ calicoctl('delete', 'pool', pool_name, '--skip-not-exists')
+
+ for cidr, name in zip(cidrs, names):
+ # configure the default pool
+ pool = {
+ 'apiVersion': 'projectcalico.org/v3',
+ 'kind': 'IPPool',
+ 'metadata': {
+ 'name': name,
+ },
+ 'spec': {
+ 'cidr': cidr,
+ 'ipipMode': config['ipip'],
+ 'vxlanMode': config['vxlan'],
+ 'natOutgoing': config['nat-outgoing'],
+ }
+ }
+
+ calicoctl_apply(pool)
+ except CalledProcessError:
+ log(traceback.format_exc())
+ if config['ipip'] != 'Never' and config['vxlan'] != 'Never':
+ status.blocked('ipip and vxlan configs are in conflict')
+ else:
+ status.waiting('Waiting to retry calico pool configuration')
+ return
+
+ set_state('calico.pool.configured')
+
+
+@when_any('config.changed.ipip', 'config.changed.nat-outgoing',
+ 'config.changed.cidr', 'config.changed.manage-pools',
+ 'config.changed.vxlan')
+def reconfigure_calico_pool():
+ ''' Reconfigure the Calico IP pool '''
+ remove_state('calico.pool.configured')
+
+
+@when('etcd.available', 'cni.is-worker', 'leadership.set.calico-v3-data-ready')
+@when_not('calico.cni.configured')
+def configure_cni():
+ ''' Configure Calico CNI. '''
+ status.maintenance('Configuring Calico CNI')
+ cni = endpoint_from_flag('cni.is-worker')
+ etcd = endpoint_from_flag('etcd.available')
+ os.makedirs('/etc/cni/net.d', exist_ok=True)
+ cni_config = cni.get_config()
+ ip_versions = {net.version for net in get_networks(charm_config('cidr'))}
+ context = {
+ 'connection_string': etcd.get_connection_string(),
+ 'etcd_key_path': ETCD_KEY_PATH,
+ 'etcd_cert_path': ETCD_CERT_PATH,
+ 'etcd_ca_path': ETCD_CA_PATH,
+ 'kubeconfig_path': cni_config['kubeconfig_path'],
+ 'mtu': get_mtu(),
+ 'assign_ipv4': 'true' if 4 in ip_versions else 'false',
+ 'assign_ipv6': 'true' if 6 in ip_versions else 'false',
+ }
+ render('10-calico.conflist', '/etc/cni/net.d/10-calico.conflist', context)
+ config = charm_config()
+ cni.set_config(cidr=config['cidr'], cni_conf_file='10-calico.conflist')
+ set_state('calico.cni.configured')
+
+
+@when('etcd.available', 'cni.is-master')
+@when_not('calico.cni.configured')
+def configure_master_cni():
+ status.maintenance('Configuring Calico CNI')
+ cni = endpoint_from_flag('cni.is-master')
+ config = charm_config()
+ cni.set_config(cidr=config['cidr'], cni_conf_file='10-calico.conflist')
+ set_state('calico.cni.configured')
+
+
+@when_any('config.changed.cidr')
+def reconfigure_cni():
+ remove_state('calico.cni.configured')
+
+
+@when('etcd.available', 'calico.cni.configured',
+ 'calico.service.installed', 'leadership.is_leader',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.npc.deployed')
+def deploy_network_policy_controller():
+ ''' Deploy the Calico network policy controller. '''
+ status.maintenance('Deploying network policy controller.')
+ etcd = endpoint_from_flag('etcd.available')
+ context = {
+ 'connection_string': etcd.get_connection_string(),
+ 'etcd_key_path': ETCD_KEY_PATH,
+ 'etcd_cert_path': ETCD_CERT_PATH,
+ 'etcd_ca_path': ETCD_CA_PATH,
+ 'calico_policy_image': charm_config('calico-policy-image'),
+ 'etcd_cert_last_modified': os.path.getmtime(ETCD_CERT_PATH)
+ }
+ render('policy-controller.yaml', '/tmp/policy-controller.yaml', context)
+ try:
+ kubectl('apply', '-f', '/tmp/policy-controller.yaml')
+ set_state('calico.npc.deployed')
+ except CalledProcessError as e:
+ status.waiting('Waiting for kubernetes')
+ log(str(e))
+
+
+@when('calico.binaries.installed', 'etcd.available',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.bgp.globals.configured')
+def configure_bgp_globals():
+ status.maintenance('Configuring BGP globals')
+ config = charm_config()
+
+ try:
+ try:
+ bgp_config = calicoctl_get('bgpconfig', 'default')
+ except CalledProcessError as e:
+ if b'resource does not exist' in e.output:
+ log('default BGPConfiguration does not exist')
+ bgp_config = {
+ 'apiVersion': 'projectcalico.org/v3',
+ 'kind': 'BGPConfiguration',
+ 'metadata': {
+ 'name': 'default'
+ },
+ 'spec': {}
+ }
+ else:
+ raise
+
+ spec = bgp_config['spec']
+ spec['asNumber'] = config['global-as-number']
+ spec['nodeToNodeMeshEnabled'] = config['node-to-node-mesh']
+ calicoctl_apply(bgp_config)
+ except CalledProcessError:
+ log(traceback.format_exc())
+ status.waiting('Waiting to retry BGP global configuration')
+ return
+
+ set_state('calico.bgp.globals.configured')
+
+
+@when_any('config.changed.global-as-number',
+ 'config.changed.node-to-node-mesh')
+def reconfigure_bgp_globals():
+ remove_state('calico.bgp.globals.configured')
+
+
+@when('calico.binaries.installed', 'etcd.available',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.node.configured')
+def configure_node():
+ status.maintenance('Configuring Calico node')
+
+ node_name = gethostname()
+ as_number = get_unit_as_number()
+ route_reflector_cluster_id = get_route_reflector_cluster_id()
+
+ try:
+ node = calicoctl_get('node', node_name)
+ node['spec']['bgp']['asNumber'] = as_number
+ node['spec']['bgp']['routeReflectorClusterID'] = \
+ route_reflector_cluster_id
+ calicoctl_apply(node)
+ except CalledProcessError:
+ log(traceback.format_exc())
+ status.waiting('Waiting to retry Calico node configuration')
+ return
+
+ set_state('calico.node.configured')
+
+
+@when_any('config.changed.subnet-as-numbers', 'config.changed.unit-as-numbers',
+ 'config.changed.route-reflector-cluster-ids')
+def reconfigure_node():
+ remove_state('calico.node.configured')
+
+
+@when('calico.binaries.installed', 'etcd.available',
+ 'leadership.set.calico-v3-data-ready')
+@when_not('calico.bgp.peers.configured')
+def configure_bgp_peers():
+ status.maintenance('Configuring BGP peers')
+
+ peers = []
+
+ # Global BGP peers
+ config = charm_config()
+ peers += yaml.safe_load(config['global-bgp-peers'])
+
+ # Subnet-scoped BGP peers
+ subnet_bgp_peers = yaml.safe_load(config['subnet-bgp-peers'])
+ subnets = filter_local_subnets(subnet_bgp_peers)
+ for subnet in subnets:
+ peers += subnet_bgp_peers[str(subnet)]
+
+ # Unit-scoped BGP peers
+ unit_id = get_unit_id()
+ unit_bgp_peers = yaml.safe_load(config['unit-bgp-peers'])
+ if unit_id in unit_bgp_peers:
+ peers += unit_bgp_peers[unit_id]
+
+ # Give names to peers
+ safe_unit_name = local_unit().replace('/', '-')
+ named_peers = {
+ # name must consist of lower case alphanumeric characters, '-' or '.'
+ '%s-%s-%s' % (safe_unit_name, peer['address'].replace(':', '-'),
+ peer['as-number']): peer
+ for peer in peers
+ }
+
+ try:
+ node_name = gethostname()
+ for peer_name, peer in named_peers.items():
+ peer_def = {
+ 'apiVersion': 'projectcalico.org/v3',
+ 'kind': 'BGPPeer',
+ 'metadata': {
+ 'name': peer_name,
+ },
+ 'spec': {
+ 'node': node_name,
+ 'peerIP': peer['address'],
+ 'asNumber': peer['as-number']
+ }
+ }
+ calicoctl_apply(peer_def)
+
+ # Delete unrecognized peers
+ existing_peers = calicoctl_get('bgppeers')['items']
+ existing_peers = [peer['metadata']['name'] for peer in existing_peers]
+ peers_to_delete = [
+ peer for peer in existing_peers
+ if peer.startswith(safe_unit_name + '-')
+ and peer not in named_peers
+ ]
+
+ for peer in peers_to_delete:
+ calicoctl('delete', 'bgppeer', peer)
+ except CalledProcessError:
+ log(traceback.format_exc())
+ status.waiting('Waiting to retry BGP peer configuration')
+ return
+
+ set_state('calico.bgp.peers.configured')
+
+
+@when_any('config.changed.global-bgp-peers', 'config.changed.subnet-bgp-peers',
+ 'config.changed.unit-bgp-peers')
+def reconfigure_bgp_peers():
+ remove_state('calico.bgp.peers.configured')
+
+
+@atexit
+def ready():
+ preconditions = [
+ 'calico.service.installed', 'calico.pool.configured',
+ 'calico.cni.configured', 'calico.bgp.globals.configured',
+ 'calico.node.configured', 'calico.bgp.peers.configured'
+ ]
+ if is_state('upgrade.series.in-progress'):
+ status.blocked('Series upgrade in progress')
+ return
+ for precondition in preconditions:
+ if not is_state(precondition):
+ return
+ if is_leader() and not is_state('calico.npc.deployed'):
+ status.waiting('Waiting to retry deploying policy controller')
+ return
+ if not service_running('calico-node'):
+ status.waiting('Waiting for service: calico-node')
+ return
+ status.active('Calico is active')
+
+
+def calicoctl(*args):
+ cmd = ['/opt/calicoctl/calicoctl'] + list(args)
+ env = os.environ.copy()
+ env.update(get_calicoctl_env())
+ try:
+ return check_output(cmd, env=env, stderr=STDOUT)
+ except CalledProcessError as e:
+ log(e.output)
+ raise
+
+
+def set_http_proxy():
+ """
+ Check if we have any values for
+ juju_http*_proxy and apply them.
+ """
+ juju_environment = env_proxy_settings()
+ if juju_environment and not juju_environment.get('disable-juju-proxy'):
+ upper = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY']
+ lower = list(map(str.lower, upper))
+ keys = upper + lower
+ for key in keys:
+ from_juju = juju_environment.get(key, None)
+ if from_juju:
+ os.environ[key] = from_juju
+
+
+@when_not('calico.image.pulled')
+@when('calico.ctl.ready')
+def pull_calico_node_image():
+ image = resource_get('calico-node-image')
+
+ if not image or os.path.getsize(image) == 0:
+ status.maintenance('Pulling calico-node image')
+ image = charm_config('calico-node-image')
+ set_http_proxy()
+ CTL.pull(image)
+ else:
+ status.maintenance('Loading calico-node image')
+ unzipped = '/tmp/calico-node-image.tar'
+ with gzip.open(image, 'rb') as f_in:
+ with open(unzipped, 'wb') as f_out:
+ f_out.write(f_in.read())
+ CTL.load(unzipped)
+
+ set_state('calico.image.pulled')
+
+
+@when_any('config.changed.calico-node-image')
+def repull_calico_node_image():
+ remove_state('calico.image.pulled')
+ remove_state('calico.service.installed')
+
+
+@when('calico.service.installed', 'calico.pool.configured')
+def disable_vxlan_tx_checksumming():
+ '''Workaround for https://github.com/projectcalico/calico/issues/3145'''
+ config = charm_config()
+
+ if config['disable-vxlan-tx-checksumming'] and config['vxlan'] != 'Never':
+ cmd = ['ethtool', '-K', 'vxlan.calico', 'tx-checksum-ip-generic',
+ 'off']
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ msg = 'Waiting to retry disabling VXLAN TX checksumming'
+ log(msg)
+ status.waiting(msg)
+
+
+def calicoctl_get(*args):
+ args = ['get', '-o', 'yaml', '--export'] + list(args)
+ output = calicoctl(*args)
+ result = yaml.safe_load(output)
+ return result
+
+
+def calicoctl_apply(data):
+ path = '/tmp/calicoctl-apply.yaml'
+ with open(path, 'w') as f:
+ yaml.dump(data, f)
+ calicoctl('apply', '-f', path)
+
+
+def kubectl(*args):
+ cmd = ['kubectl', '--kubeconfig=/root/.kube/config'] + list(args)
+ try:
+ return check_output(cmd)
+ except CalledProcessError as e:
+ log(e.output)
+ raise
+
+
+def get_calicoctl_env():
+ etcd = endpoint_from_flag('etcd.available')
+ env = {}
+ env['ETCD_ENDPOINTS'] = etcd.get_connection_string()
+ env['ETCD_KEY_FILE'] = ETCD_KEY_PATH
+ env['ETCD_CERT_FILE'] = ETCD_CERT_PATH
+ env['ETCD_CA_CERT_FILE'] = ETCD_CA_PATH
+ return env
+
+
+def get_unit_as_number():
+ config = charm_config()
+
+ # Check for matching unit rule
+ unit_id = get_unit_id()
+ unit_as_numbers = yaml.safe_load(config['unit-as-numbers'])
+ if unit_id in unit_as_numbers:
+ as_number = unit_as_numbers[unit_id]
+ return as_number
+
+ # Check for matching subnet rule
+ subnet_as_numbers = yaml.safe_load(config['subnet-as-numbers'])
+ subnets = filter_local_subnets(subnet_as_numbers)
+ if subnets:
+ subnets.sort(key=lambda subnet: -subnet.prefixlen)
+ subnet = subnets[0]
+ as_number = subnet_as_numbers[str(subnet)]
+ return as_number
+
+ # No AS number specified for this unit.
+ return None
+
+
+def filter_local_subnets(subnets):
+ ip_address = get_bind_address()
+ ip_address = ipaddress.ip_address(ip_address) # IP address
+ subnets = [ipaddress.ip_network(subnet) for subnet in subnets]
+ subnets = [subnet for subnet in subnets if ip_address in subnet]
+ return subnets
+
+
+def get_unit_id():
+ return int(local_unit().split('/')[1])
+
+
+def get_route_reflector_cluster_id():
+ config = charm_config()
+ route_reflector_cluster_ids = yaml.safe_load(
+ config['route-reflector-cluster-ids']
+ )
+ unit_id = get_unit_id()
+ return route_reflector_cluster_ids.get(unit_id)
+
+
+def get_network(cidr):
+ '''Convert a CIDR to a network instance.'''
+ return ipaddress.ip_interface(cidr.strip()).network
+
+
+def get_networks(cidrs):
+ '''Convert a comma-separated list of CIDRs to a list of networks.'''
+ return [get_network(cidr) for cidr in cidrs.split(',')]
diff --git a/calico/reactive/leadership.py b/calico/reactive/leadership.py
new file mode 100644
index 0000000..29c6f3a
--- /dev/null
+++ b/calico/reactive/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.leadership import leader_get, leader_set
+
+
+__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
+
+
+def initialize_leadership_state():
+ '''Initialize leadership.* states from the hook environment.
+
+ Invoked by hookenv.atstart() so states are available in
+ @hook decorated handlers.
+ '''
+ is_leader = hookenv.is_leader()
+ if is_leader:
+ hookenv.log('Initializing Leadership Layer (is leader)')
+ else:
+ hookenv.log('Initializing Leadership Layer (is follower)')
+
+ reactive.helpers.toggle_state('leadership.is_leader', is_leader)
+
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+ current = hookenv.leader_get()
+
+ # Handle deletions.
+ for key in set(previous.keys()) - set(current.keys()):
+ current[key] = None
+
+ any_changed = False
+ for key, value in current.items():
+ reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
+ value != previous.get(key))
+ if value != previous.get(key):
+ any_changed = True
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ reactive.helpers.toggle_state('leadership.changed', any_changed)
+
+ unitdata.kv().update(current, prefix='leadership.settings.')
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(reactive, '_leadership_registered'):
+ hookenv.atstart(initialize_leadership_state)
+ reactive._leadership_registered = True
diff --git a/calico/reactive/status.py b/calico/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/calico/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/calico/requirements.txt b/calico/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/calico/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/calico/revision b/calico/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/calico/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/calico/script/bootstrap b/calico/script/bootstrap
new file mode 100644
index 0000000..b69771c
--- /dev/null
+++ b/calico/script/bootstrap
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -x
+
+sudo apt update
+sudo apt install -qyf docker.io
+sudo snap install charm --classic
+sudo snap install yq
diff --git a/calico/script/build b/calico/script/build
new file mode 100644
index 0000000..6bbbc48
--- /dev/null
+++ b/calico/script/build
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm build -r --force -o "$CHARM_BUILD_DIR"
diff --git a/calico/script/upload b/calico/script/upload
new file mode 100644
index 0000000..1bb581d
--- /dev/null
+++ b/calico/script/upload
@@ -0,0 +1,53 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm whoami
+RET=$?
+if ((RET > 0)); then
+ echo "Not logged into charmstore"
+ exit 1
+fi
+
+function generate::attachments
+{
+ ./build-calico-resource.sh
+ touch calico-node-image.tar.gz
+
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
+ calico-node-image=calico-node-image.tar.gz
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
+ calico=calico-amd64.tar.gz
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
+ calico-arm64=calico-arm64.tar.gz
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
+ calico-upgrade=calico-upgrade-amd64.tar.gz
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
+ calico-upgrade-arm64=calico-upgrade-arm64.tar.gz
+}
+
+
+function generate::resource::argument
+{
+ py_script="
+import sys
+import json
+resources_json = json.load(sys.stdin)
+resource_map = []
+for item in resources_json:
+ resource_map.append(f\"--resource {item['Name']}-{item['Revision']}\")
+
+print(' '.join(resource_map))
+"
+ charm list-resources cs:~"$NAMESPACE"/"$CHARM" --channel unpublished --format json | env python3 -c "$py_script"
+}
+
+URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url)
+generate::attachments
+
+if [ "$CHANNEL" != unpublished ]; then
+ charm release "$URL" --channel "$CHANNEL" $(generate::resource::argument)
+fi
diff --git a/calico/templates/10-calico.conflist b/calico/templates/10-calico.conflist
new file mode 100644
index 0000000..5d60e08
--- /dev/null
+++ b/calico/templates/10-calico.conflist
@@ -0,0 +1,33 @@
+{
+ "name": "calico-k8s-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "etcd_endpoints": "{{ connection_string }}",
+ "etcd_key_file": "{{ etcd_key_path }}",
+ "etcd_cert_file": "{{ etcd_cert_path }}",
+ "etcd_ca_cert_file": "{{ etcd_ca_path }}",
+ "log_level": "info",
+ {% if mtu -%}
+ "mtu": {{ mtu }},
+ {%- endif %}
+ "ipam": {
+ "type": "calico-ipam",
+ "assign_ipv4": "{{ assign_ipv4 }}",
+ "assign_ipv6": "{{ assign_ipv6 }}"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "{{ kubeconfig_path }}"
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {"portMappings": true},
+ "snat": true
+ }
+ ]
+}
diff --git a/calico/templates/calico-node.service b/calico/templates/calico-node.service
new file mode 100644
index 0000000..214ea7a
--- /dev/null
+++ b/calico/templates/calico-node.service
@@ -0,0 +1,53 @@
+[Unit]
+Description=calico node
+
+[Service]
+User=root
+Environment=ETCD_ENDPOINTS={{ connection_string }}
+# Setting LC_ALL and LANG works around a bug that only occurs on Xenial
+# https://bugs.launchpad.net/bugs/1911220
+Environment=LC_ALL={{ lc_all }}
+Environment=LANG={{ lang }}
+PermissionsStartOnly=true
+ExecStartPre=-/usr/local/sbin/charm-env --charm calico conctl delete calico-node
+ExecStartPre=/bin/mkdir -p /var/run/calico /var/log/calico /var/lib/calico
+ExecStart=/usr/local/sbin/charm-env --charm calico conctl run \
+ --rm \
+ --net-host \
+ --privileged \
+ --env ETCD_ENDPOINTS={{ connection_string }} \
+ --env ETCD_CA_CERT_FILE={{ etcd_ca_path }} \
+ --env ETCD_CERT_FILE={{ etcd_cert_path }} \
+ --env ETCD_KEY_FILE={{ etcd_key_path }} \
+ --env NODENAME={{ nodename }} \
+ --env IP={{ ip }} \
+ {% if ipv4 == "none" -%}
+ --env CALICO_ROUTER_ID="hash" \
+ {% endif -%}
+ --env IP6={{ ip6 }} \
+ {% if ip6 != "none" -%}
+ --env FELIX_IPV6SUPPORT=true \
+ {% endif -%}
+ --env NO_DEFAULT_POOLS=true \
+ --env AS= \
+ --env CALICO_LIBNETWORK_ENABLED=true \
+ --env CALICO_NETWORKING_BACKEND=bird \
+ --env FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
+ --env FELIX_IGNORELOOSERPF={{ ignore_loose_rpf | string | lower }} \
+ {% if mtu -%}
+ --env FELIX_IPINIPMTU={{ mtu }} \
+ --env FELIX_VXLANMTU={{ mtu }} \
+ {% endif -%}
+ --mount /lib/modules:/lib/modules \
+ --mount /var/run/calico:/var/run/calico \
+ --mount /var/log/calico:/var/log/calico \
+ --mount /var/lib/calico:/var/lib/calico \
+ --mount /opt/calicoctl:/opt/calicoctl \
+ --name calico-node \
+ {{ calico_node_image }}
+ExecStop=-/usr/local/sbin/charm-env --charm calico conctl delete calico-node
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/calico/templates/calicoctl b/calico/templates/calicoctl
new file mode 100644
index 0000000..98fb522
--- /dev/null
+++ b/calico/templates/calicoctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -eu
+. /opt/calicoctl/calicoctl.env
+exec /opt/calicoctl/calicoctl "$@"
diff --git a/calico/templates/policy-controller.yaml b/calico/templates/policy-controller.yaml
new file mode 100644
index 0000000..7611de0
--- /dev/null
+++ b/calico/templates/policy-controller.yaml
@@ -0,0 +1,155 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Pods are monitored for changing labels.
+ # The node controller monitors Kubernetes nodes.
+ # Namespace and serviceaccount labels are used for policy.
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ - serviceaccounts
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ cdk-restart-on-ca-change: "true"
+spec:
+ # Only a single instance of the this pod should be
+ # active at a time. Since this pod is run as a Deployment,
+ # Kubernetes will ensure the pod is recreated in case of failure,
+ # removing the need for passive backups.
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ # annotate etcd cert modification time, so that when it changes, k8s
+ # will restart the pod
+ cdk-etcd-cert-last-modified: "{{ etcd_cert_last_modified }}"
+ spec:
+ hostNetwork: true
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-kube-controllers
+ image: {{ calico_policy_image }}
+ env:
+ - name: ETCD_ENDPOINTS
+ value: {{ connection_string }}
+ - name: ETCD_CA_CERT_FILE
+ value: {{ etcd_ca_path }}
+ - name: ETCD_CERT_FILE
+ value: {{ etcd_cert_path }}
+ - name: ETCD_KEY_FILE
+ value: {{ etcd_key_path }}
+ volumeMounts:
+ - name: calicoctl
+ mountPath: /opt/calicoctl
+ volumes:
+ - name: calicoctl
+ hostPath:
+ path: /opt/calicoctl
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: namespace-reader
+rules:
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: nodes-namespace-reader
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:nodes
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: namespace-reader
diff --git a/calico/tests/00-setup b/calico/tests/00-setup
new file mode 100755
index 0000000..f0616a5
--- /dev/null
+++ b/calico/tests/00-setup
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
diff --git a/calico/tests/10-deploy b/calico/tests/10-deploy
new file mode 100755
index 0000000..dd2c51f
--- /dev/null
+++ b/calico/tests/10-deploy
@@ -0,0 +1,31 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('layer-calico-cni')
+ self.d.expose('layer-calico-cni')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['layer-calico-cni'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
diff --git a/calico/tests/conftest.py b/calico/tests/conftest.py
new file mode 100644
index 0000000..462d852
--- /dev/null
+++ b/calico/tests/conftest.py
@@ -0,0 +1,6 @@
+import charms.unit_test
+
+
+charms.unit_test.patch_reactive()
+charms.unit_test.patch_module('conctl')
+charms.unit_test.patch_module('charms.leadership')
diff --git a/calico/tests/test_calico.py b/calico/tests/test_calico.py
new file mode 100644
index 0000000..1923e1f
--- /dev/null
+++ b/calico/tests/test_calico.py
@@ -0,0 +1,16 @@
+from charmhelpers.core.hookenv import is_leader # patched
+from charmhelpers.core.host import service_running # patched
+from reactive import calico
+
+
+def test_series_upgrade():
+ calico.set_state('upgrade.series.in-progress')
+ is_leader.return_value = False
+ service_running.return_value = True
+ assert calico.status.blocked.call_count == 0
+ assert calico.status.waiting.call_count == 0
+ assert calico.status.active.call_count == 0
+ calico.ready()
+ assert calico.status.blocked.call_count == 1
+ assert calico.status.waiting.call_count == 0
+ assert calico.status.active.call_count == 0
diff --git a/calico/tox.ini b/calico/tox.ini
new file mode 100644
index 0000000..b8ee144
--- /dev/null
+++ b/calico/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+skipsdist = True
+envlist = lint,py3
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}:{toxinidir}/lib
+deps =
+ pyyaml
+ pytest
+ flake8
+ ipdb
+ git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test
+commands = pytest --tb native -s {posargs}
+
+[testenv:lint]
+commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests
diff --git a/calico/version b/calico/version
new file mode 100644
index 0000000..91808cc
--- /dev/null
+++ b/calico/version
@@ -0,0 +1 @@
+0ea81f0c
\ No newline at end of file
diff --git a/calico/wheelhouse.txt b/calico/wheelhouse.txt
new file mode 100644
index 0000000..ccf7d4a
--- /dev/null
+++ b/calico/wheelhouse.txt
@@ -0,0 +1,23 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5
+# for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+MarkupSafe<2.0.0
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
+# calico
+conctl-py35==0.1.2
+# pin click to avoid bringing in incompatible setuptools>=42
+click<8.0
+
diff --git a/calico/wheelhouse/Jinja2-2.10.1.tar.gz b/calico/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/calico/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/calico/wheelhouse/MarkupSafe-1.1.1.tar.gz b/calico/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/calico/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/calico/wheelhouse/PyYAML-5.2.tar.gz b/calico/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/calico/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/calico/wheelhouse/Tempita-0.5.2.tar.gz b/calico/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/calico/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/calico/wheelhouse/charmhelpers-0.20.22.tar.gz b/calico/wheelhouse/charmhelpers-0.20.22.tar.gz
new file mode 100644
index 0000000..bd5d222
Binary files /dev/null and b/calico/wheelhouse/charmhelpers-0.20.22.tar.gz differ
diff --git a/calico/wheelhouse/charms.reactive-1.4.1.tar.gz b/calico/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/calico/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/calico/wheelhouse/click-7.1.2.tar.gz b/calico/wheelhouse/click-7.1.2.tar.gz
new file mode 100644
index 0000000..698411c
Binary files /dev/null and b/calico/wheelhouse/click-7.1.2.tar.gz differ
diff --git a/calico/wheelhouse/conctl-py35-0.1.2.tar.gz b/calico/wheelhouse/conctl-py35-0.1.2.tar.gz
new file mode 100644
index 0000000..651dde5
Binary files /dev/null and b/calico/wheelhouse/conctl-py35-0.1.2.tar.gz differ
diff --git a/calico/wheelhouse/netaddr-0.7.19.tar.gz b/calico/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/calico/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/calico/wheelhouse/pbr-5.6.0.tar.gz b/calico/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/calico/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/calico/wheelhouse/pip-18.1.tar.gz b/calico/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/calico/wheelhouse/pip-18.1.tar.gz differ
diff --git a/calico/wheelhouse/pyaml-20.4.0.tar.gz b/calico/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/calico/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/calico/wheelhouse/setuptools-41.6.0.zip b/calico/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/calico/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/calico/wheelhouse/setuptools_scm-1.17.0.tar.gz b/calico/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/calico/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/calico/wheelhouse/six-1.16.0.tar.gz b/calico/wheelhouse/six-1.16.0.tar.gz
new file mode 100644
index 0000000..5bf3a27
Binary files /dev/null and b/calico/wheelhouse/six-1.16.0.tar.gz differ
diff --git a/calico/wheelhouse/wheel-0.33.6.tar.gz b/calico/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/calico/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/containerd/.build.manifest b/containerd/.build.manifest
new file mode 100644
index 0000000..d57a8eb
--- /dev/null
+++ b/containerd/.build.manifest
@@ -0,0 +1,646 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "be187bfe2ed511fc7ee29bf25f7374a2d6d34b2d",
+ "url": "layer:container-runtime-common"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "8a4e635092c98cef3eecd27063c7b2ae030e740e",
+ "url": "containerd"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "6f927f10b97f45c566481cf57a29d433f17373e1",
+ "url": "interface:container-runtime"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b59ce0c44bc52c789175750ce18b42f76c9a4578",
+ "url": "interface:untrusted-container-runtime"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "365ec9f348ccd561a9ec3e084c826f134676439e",
+ "url": "interface:docker-registry"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".gitignore": [
+ "containerd",
+ "static",
+ "0046c3f0ce1c6b217c55dbc60caf1af0287efb5502984c6554e94fd4a6b59628"
+ ],
+ ".travis.yml": [
+ "containerd",
+ "static",
+ "ab2c8c5a3ae50ec307e9e19ec30a20d4765161e0cb3bddb66f09c4a1b72b7f71"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "LICENSE": [
+ "containerd",
+ "static",
+ "c67d5f530080ecc4136a22d0543049b71016ea5dbbaa1d02b6d128d2d8fe3888"
+ ],
+ "Makefile": [
+ "layer:basic",
+ "static",
+ "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
+ ],
+ "README.md": [
+ "containerd",
+ "static",
+ "96e79c4e24b3f1cef60d2340d171d0200797e3ea2419a3e42ec631fa3e0126be"
+ ],
+ "actions.yaml": [
+ "containerd",
+ "dynamic",
+ "58f291b03b7a1fbe3637633ed799765ce5b3057b8eb75db9dc77e4f0c045427b"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "actions/upgrade-containerd": [
+ "containerd",
+ "static",
+ "12a77b76f2e4043b2405ea631ea0c736c55c4570256695719a657bccf6a36296"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "containerd",
+ "dynamic",
+ "93c92f8e530d50a436eab8dd2573a78e2d911aa53e9866b08ac61d6efec7e3f4"
+ ],
+ "copyright": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/containerd-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/containerd-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/containerd-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/containerd-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/containerd-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/docker-registry-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/docker-registry-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/docker-registry-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/docker-registry-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/docker-registry-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/container-runtime/.gitignore": [
+ "interface:container-runtime",
+ "static",
+ "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732"
+ ],
+ "hooks/relations/container-runtime/LICENSE": [
+ "interface:container-runtime",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/container-runtime/README.md": [
+ "interface:container-runtime",
+ "static",
+ "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611"
+ ],
+ "hooks/relations/container-runtime/__init__.py": [
+ "interface:container-runtime",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/container-runtime/interface.yaml": [
+ "interface:container-runtime",
+ "static",
+ "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6"
+ ],
+ "hooks/relations/container-runtime/provides.py": [
+ "interface:container-runtime",
+ "static",
+ "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06"
+ ],
+ "hooks/relations/container-runtime/requires.py": [
+ "interface:container-runtime",
+ "static",
+ "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761"
+ ],
+ "hooks/relations/docker-registry/.gitignore": [
+ "interface:docker-registry",
+ "static",
+ "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8"
+ ],
+ "hooks/relations/docker-registry/LICENSE": [
+ "interface:docker-registry",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/docker-registry/README.md": [
+ "interface:docker-registry",
+ "static",
+ "a42f9e72af9a6fad058e8f156e79aeaadccf3e2b03b5b7dc750704b168f4347c"
+ ],
+ "hooks/relations/docker-registry/__init__.py": [
+ "interface:docker-registry",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/docker-registry/interface.yaml": [
+ "interface:docker-registry",
+ "static",
+ "29816ed0d72772b26108012481b64db1385ff42d3349fa175bb05331ca0f933c"
+ ],
+ "hooks/relations/docker-registry/provides.py": [
+ "interface:docker-registry",
+ "static",
+ "618cc1b19c2e77106363938931a342918306d731be61665167bad4615a30d9c7"
+ ],
+ "hooks/relations/docker-registry/requires.py": [
+ "interface:docker-registry",
+ "static",
+ "9db322b76a6bbdc15b36fba077bddbff7dfc6dbfe9c245fed548c00e6c354578"
+ ],
+ "hooks/relations/untrusted-container-runtime/.gitignore": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732"
+ ],
+ "hooks/relations/untrusted-container-runtime/LICENSE": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/untrusted-container-runtime/README.md": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "e3dc7db9ee98b716cb9a3a281fad88ca313bc11888a0da2f4b63c4306d91b64f"
+ ],
+ "hooks/relations/untrusted-container-runtime/__init__.py": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/untrusted-container-runtime/interface.yaml": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "1fcb0305295206dc2b9926bf1870cae2c6cd8eee6eef72b6060c85e4f2109a45"
+ ],
+ "hooks/relations/untrusted-container-runtime/provides.py": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "05a52be7ad18df5cac9fb5dcc27c2ab24fe12e65fa809e0ea4d395dbcb36e6f2"
+ ],
+ "hooks/relations/untrusted-container-runtime/requires.py": [
+ "interface:untrusted-container-runtime",
+ "static",
+ "958e03e254ee27bee761a6af3e032a273204b356dc51438489cde726b1a6e060"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/untrusted-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/untrusted-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/untrusted-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/untrusted-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/untrusted-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "containerd",
+ "static",
+ "6d0d1bf06e7bc7da205c138566c92a35026d9d06ceaaea382e7b2a2b1c829fd4"
+ ],
+ "layer.yaml": [
+ "containerd",
+ "dynamic",
+ "1ef37c9d5f7bb467d9baf12d27f220d9be7eeaee13fe52acedab02aa5d42e7db"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/container_runtime_common.py": [
+ "layer:container-runtime-common",
+ "static",
+ "1c6745c0966687d82da9dc8d10add74389c2b7585149afcb81f59405bde227f3"
+ ],
+ "lib/charms/layer/containerd.py": [
+ "containerd",
+ "static",
+ "e4b1e9001625f1410ef319f0e357172f8a2db9edb45f593f39c16c8f7496e6ba"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "containerd",
+ "dynamic",
+ "ef674a2122e46ff6ddb60ac60d79af3a56fd6b799e4ccf33cc15a430dac409db"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "reactive/__init__.py": [
+ "layer:basic",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/container_runtime_common.py": [
+ "layer:container-runtime-common",
+ "static",
+ "b612ab91a73e8b6c5bb02dae9a0813fe164e96875c175d48b1e736ab25091a11"
+ ],
+ "reactive/containerd.py": [
+ "containerd",
+ "static",
+ "ca60ebe176530f379308dda0bba4d193acfcf8ce1e7ec923db6438b8b2f74933"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "scripts/enable_grub_cgroups.sh": [
+ "layer:container-runtime-common",
+ "static",
+ "08f39868716900be03e2612a63d5fbbd661599484f05f5164c5d794415cfa260"
+ ],
+ "templates/config.toml": [
+ "containerd",
+ "static",
+ "d84f9f266929e684c0b0a596704f075d26b97d7a0e43b525364a77dc22d2f320"
+ ],
+ "templates/proxy.conf": [
+ "containerd",
+ "static",
+ "a6f8922e02757d6ee914d160b7c677da555629b956a7068c3d2c8da4f56fa19a"
+ ],
+ "tests/conftest.py": [
+ "layer:container-runtime-common",
+ "static",
+ "a93443f61e197f9845d65a72c8cdcb68709eb48451fe9624a23b2509ca69966c"
+ ],
+ "tests/test_cidr_notation.py": [
+ "layer:container-runtime-common",
+ "static",
+ "64e6e8030abe0b9a088d81d7f502a2b8774db8d226c648c34de11a58416eea43"
+ ],
+ "tests/test_merge_config.py": [
+ "layer:container-runtime-common",
+ "static",
+ "fe278ee4e888564dd4c09e2d4dac2db5d914b41fd82d78af30960f02fa8461fc"
+ ],
+ "tests/test_reactive.py": [
+ "layer:container-runtime-common",
+ "static",
+ "432228776392fff168f3f770a9aab4419f4656bffc708e05a2310a8cb87eeaff"
+ ],
+ "tox.ini": [
+ "layer:container-runtime-common",
+ "static",
+ "ecfad595576db991e0910219e2debaf1d4c1e056054f5dbacc8f3e32c0a03840"
+ ],
+ "version": [
+ "containerd",
+ "dynamic",
+ "2737d85a96f3fb093896eb885501ad940a695d5b9bb1d0d3816ace9eb68df82e"
+ ],
+ "wheelhouse.txt": [
+ "containerd",
+ "dynamic",
+ "ff85b4195a997d8df2b05ce61b4e943a2fafb9152a7a7c7d112edd723d9e7d3c"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/certifi-2020.12.5.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"
+ ],
+ "wheelhouse/chardet-4.0.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/idna-2.10.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/requests-2.25.1.tar.gz": [
+ "containerd",
+ "dynamic",
+ "27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/urllib3-1.26.4.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/containerd/.gitignore b/containerd/.gitignore
new file mode 100644
index 0000000..32cbc67
--- /dev/null
+++ b/containerd/.gitignore
@@ -0,0 +1,4 @@
+.coverage
+.tox/
+__pycache__/
+*.pyc
diff --git a/containerd/.travis.yml b/containerd/.travis.yml
new file mode 100644
index 0000000..ba89235
--- /dev/null
+++ b/containerd/.travis.yml
@@ -0,0 +1,10 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+ - "3.8"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/containerd/.travis/profile-update.yaml b/containerd/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/containerd/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/containerd/LICENSE b/containerd/LICENSE
new file mode 100644
index 0000000..1e428b8
--- /dev/null
+++ b/containerd/LICENSE
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2019 Canonical Ltd.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/containerd/Makefile b/containerd/Makefile
new file mode 100644
index 0000000..a1ad3a5
--- /dev/null
+++ b/containerd/Makefile
@@ -0,0 +1,24 @@
+#!/usr/bin/make
+
+all: lint unit_test
+
+
+.PHONY: clean
+clean:
+ @rm -rf .tox
+
+.PHONY: apt_prereqs
+apt_prereqs:
+ @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
+ @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
+
+.PHONY: lint
+lint: apt_prereqs
+ @tox --notest
+ @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
+ @charm proof
+
+.PHONY: unit_test
+unit_test: apt_prereqs
+ @echo Starting tests...
+ tox
diff --git a/containerd/README.md b/containerd/README.md
new file mode 100644
index 0000000..9a64349
--- /dev/null
+++ b/containerd/README.md
@@ -0,0 +1,14 @@
+# Charm for Containerd
+
+This subordinate charm deploys the [Containerd](https://containerd.io/)
+engine within a running Juju model. Containerd is an open platform
+for developers and sysadmins to build, ship, and run distributed applications
+in containers.
+
+Containerd focuses on distributing applications as containers that can be quickly
+assembled from components that are run the same on different servers without
+environmental dependencies. This eliminates the friction between development,
+QA, and production environments.
+
+This charm is a component of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-containerd).
diff --git a/containerd/actions.yaml b/containerd/actions.yaml
new file mode 100644
index 0000000..68539b1
--- /dev/null
+++ b/containerd/actions.yaml
@@ -0,0 +1,4 @@
+"debug":
+ "description": "Collect debug data"
+"upgrade-containerd":
+ "description": "Force upgrades Containerd to latest repository version"
diff --git a/containerd/actions/debug b/containerd/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/containerd/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/containerd/actions/upgrade-containerd b/containerd/actions/upgrade-containerd
new file mode 100755
index 0000000..894bf23
--- /dev/null
+++ b/containerd/actions/upgrade-containerd
@@ -0,0 +1,43 @@
+#!/usr/local/sbin/charm-env python3
+
+from charmhelpers.core.hookenv import (
+ action_set,
+ action_fail
+)
+
+from charmhelpers.fetch import (
+ apt_install,
+ apt_update,
+ apt_hold,
+ apt_unhold
+)
+
+from charmhelpers.core.host import service_restart
+
+from charms.reactive import remove_state
+
+from reactive.containerd import CONTAINERD_PACKAGE
+
+
+def main():
+ """
+ Upgrade containerd to the latest in apt.
+
+ :return: None
+ """
+ try:
+ apt_update(fatal=True)
+ apt_unhold(CONTAINERD_PACKAGE)
+ apt_install(CONTAINERD_PACKAGE, fatal=True)
+ apt_hold(CONTAINERD_PACKAGE)
+ service_restart(CONTAINERD_PACKAGE)
+
+ remove_state('containerd.version-published')
+ action_set({'runtime': CONTAINERD_PACKAGE})
+
+ except Exception as e:
+ action_fail(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/containerd/bin/charm-env b/containerd/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/containerd/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/containerd/bin/layer_option b/containerd/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/containerd/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/containerd/config.yaml b/containerd/config.yaml
new file mode 100644
index 0000000..2356a3b
--- /dev/null
+++ b/containerd/config.yaml
@@ -0,0 +1,86 @@
+"options":
+ "enable-cgroups":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Enable GRUB cgroup overrides cgroup_enable=memory swapaccount=1. WARNING
+ changing this option will reboot the host - use with caution on production
+ services.
+ "disable-juju-proxy":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Ignore juju-http(s) proxy settings on this charm.
+ If set to true, all juju https proxy settings will be ignored
+ "custom-registry-ca":
+ "type": "string"
+ "default": ""
+ "description": |
+ Base64 encoded Certificate Authority (CA) bundle. Setting this config
+ allows container runtimes to pull images from registries with TLS
+ certificates signed by an external CA.
+ "custom_registries":
+ "type": "string"
+ "default": "[]"
+ "description": |
+ Registry endpoints and credentials. Setting this config allows Kubelet
+ to pull images from registries where auth is required.
+
+ The value for this config must be a JSON array of credential objects, like this:
+ [{"host": "my.registry:port", "username": "user", "password": "pass"}]
+
+ `host` could be registry host address, e.g.: myregistry.io:9000, 10.10.10.10:5432.
+ or a name, e.g.: myregistry.io, myregistry.
+ It will be derived from `url` if not provided, e.g.:
+ url: http://10.10.10.10:8000 --> host: 10.10.10.10:8000
+
+ If required, you can supply credentials with option keys 'username' and 'password',
+ or 'ca_file', 'cert_file', and 'key_file' for ssl/tls communication,
+ which should be base64 encoded file contents in string form
+
+ "ca_file": "'"$(base64 -w 0 < my.custom.registry.pem)"'"
+
+ example config)
+ juju config containerd custom_registries='[{
+ "url": "https://registry.example.com",
+ "ca_file": "'"$(base64 -w 0 < ~/my.custom.ca.pem)"'",
+ "cert_file": "'"$(base64 -w 0 < ~/my.custom.cert.pem)"'",
+ "key_file": "'"$(base64 -w 0 < ~/my.custom.key.pem)"'",
+ }]'
+ "gpu_driver":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Override GPU driver installation. Options are "auto", "nvidia", "none".
+ "runtime":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Set a custom containerd runtime. Set "auto" to select based on hardware.
+ "shim":
+ "type": "string"
+ "default": "containerd-shim"
+ "description": |
+ Set a custom containerd shim.
+ "http_proxy":
+ "type": "string"
+ "default": ""
+ "description": |
+ URL to use for HTTP_PROXY to be used by Containerd. Useful in
+ egress-filtered environments where a proxy is the only option for
+ accessing the registry to pull images.
+ "https_proxy":
+ "type": "string"
+ "default": ""
+ "description": |
+ URL to use for HTTPS_PROXY to be used by Containerd. Useful in
+ egress-filtered environments where a proxy is the only option for
+ accessing the registry to pull images.
+ "no_proxy":
+ "type": "string"
+ "default": ""
+ "description": |
+ Comma-separated list of destinations (either domain names or IP
+ addresses) which should be accessed directly, rather than through
+ the proxy defined in http_proxy or https_proxy. Must be less than
+ 2023 characters long.
diff --git a/containerd/copyright b/containerd/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/containerd/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/copyright.layer-basic b/containerd/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/containerd/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/copyright.layer-options b/containerd/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/containerd/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/debug-scripts/charm-unitdata b/containerd/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/containerd/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/containerd/debug-scripts/filesystem b/containerd/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/containerd/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/containerd/debug-scripts/juju-logs b/containerd/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/containerd/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/containerd/debug-scripts/juju-network-get b/containerd/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/containerd/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/containerd/debug-scripts/network b/containerd/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/containerd/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/containerd/debug-scripts/packages b/containerd/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/containerd/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/containerd/debug-scripts/sysctl b/containerd/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/containerd/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/containerd/debug-scripts/systemd b/containerd/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/containerd/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/containerd/docs/status.md b/containerd/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/containerd/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/containerd/hooks/config-changed b/containerd/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/containerd-relation-broken b/containerd/hooks/containerd-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/containerd-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/containerd-relation-changed b/containerd/hooks/containerd-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/containerd-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/containerd-relation-created b/containerd/hooks/containerd-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/containerd-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/containerd-relation-departed b/containerd/hooks/containerd-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/containerd-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/containerd-relation-joined b/containerd/hooks/containerd-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/containerd-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/docker-registry-relation-broken b/containerd/hooks/docker-registry-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/docker-registry-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/docker-registry-relation-changed b/containerd/hooks/docker-registry-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/docker-registry-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/docker-registry-relation-created b/containerd/hooks/docker-registry-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/docker-registry-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/docker-registry-relation-departed b/containerd/hooks/docker-registry-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/docker-registry-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/docker-registry-relation-joined b/containerd/hooks/docker-registry-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/docker-registry-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/hook.template b/containerd/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/install b/containerd/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/leader-elected b/containerd/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/leader-settings-changed b/containerd/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/post-series-upgrade b/containerd/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/pre-series-upgrade b/containerd/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/relations/container-runtime/.gitignore b/containerd/hooks/relations/container-runtime/.gitignore
new file mode 100644
index 0000000..894a44c
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/.gitignore
@@ -0,0 +1,104 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
diff --git a/containerd/hooks/relations/container-runtime/LICENSE b/containerd/hooks/relations/container-runtime/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/hooks/relations/container-runtime/README.md b/containerd/hooks/relations/container-runtime/README.md
new file mode 100644
index 0000000..4620013
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/README.md
@@ -0,0 +1,45 @@
+# interface-container-runtime
+
+## Overview
+
+This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime.
+
+## Usage
+
+### Provides
+
+The providing side of the container interface provides a place for a container runtime to connect to.
+
+Your charm should respond to the `endpoint.{endpoint_name}.available` state,
+which indicates that there is a container runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def update_kubelet_config(containerd):
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ config = endpoint.get_config()
+ kubelet.config['container-runtime'] = \
+ config['runtime']
+```
+
+### Requires
+
+The requiring side of the container interface requires a place for a container runtime to connect to.
+
+Your charm should set `{endpoint_name}.available` state,
+which indicates that the container is runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def pubish_config():
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ endpoint.set_config(
+ socket='unix:///var/run/containerd/containerd.sock',
+ runtime='remote',
+ nvidia_enabled=False
+ )
+```
diff --git a/containerd/hooks/relations/container-runtime/__init__.py b/containerd/hooks/relations/container-runtime/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/containerd/hooks/relations/container-runtime/interface.yaml b/containerd/hooks/relations/container-runtime/interface.yaml
new file mode 100644
index 0000000..294be1e
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/interface.yaml
@@ -0,0 +1,4 @@
+name: container-runtime
+summary: Interface for relating to container runtimes
+version: 1
+maintainer: "Joe Borg "
diff --git a/containerd/hooks/relations/container-runtime/provides.py b/containerd/hooks/relations/container-runtime/provides.py
new file mode 100644
index 0000000..a9768a8
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/provides.py
@@ -0,0 +1,55 @@
+from charms.reactive import (
+ Endpoint,
+ toggle_flag
+)
+
+
+class ContainerRuntimeProvides(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+
+ def _get_config(self, key):
+ """
+ Get the published configuration for a given key.
+
+ :param key: String dict key
+ :return: String value for given key
+ """
+ return self.all_joined_units.received.get(key)
+
+ def get_nvidia_enabled(self):
+ """
+ Get the published nvidia config.
+
+ :return: String
+ """
+ return self._get_config(key='nvidia_enabled')
+
+ def get_runtime(self):
+ """
+ Get the published runtime config.
+
+ :return: String
+ """
+ return self._get_config(key='runtime')
+
+ def get_socket(self):
+ """
+ Get the published socket config.
+
+ :return: String
+ """
+ return self._get_config(key='socket')
+
+ def set_config(self, sandbox_image=None):
+ """
+ Set the configuration to be published.
+
+ :param sandbox_image: String to optionally override the sandbox image
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'sandbox_image': sandbox_image
+ })
diff --git a/containerd/hooks/relations/container-runtime/requires.py b/containerd/hooks/relations/container-runtime/requires.py
new file mode 100644
index 0000000..c461b68
--- /dev/null
+++ b/containerd/hooks/relations/container-runtime/requires.py
@@ -0,0 +1,61 @@
+from charms.reactive import (
+ Endpoint,
+ clear_flag,
+ data_changed,
+ is_data_changed,
+ toggle_flag
+)
+
+
+class ContainerRuntimeRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'),
+ self.is_joined and self._config_changed())
+
+ def _config_changed(self):
+ """
+ Determine if our received data has changed.
+
+ :return: Boolean
+ """
+ # NB: this call should match whatever we're tracking in handle_remote_config
+ return is_data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def handle_remote_config(self):
+ """
+ Keep track of received data so we can know if it changes.
+
+ :return: None
+ """
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'))
+ # Presently, we only care about one piece of remote config. Expand
+ # the list as needed.
+ data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def get_sandbox_image(self):
+ """
+ Get the sandbox image URI if a remote has published one.
+
+ :return: String: remotely configured sandbox image
+ """
+ return self.all_joined_units.received.get('sandbox_image')
+
+ def set_config(self, socket, runtime, nvidia_enabled):
+ """
+ Set the configuration to be published.
+
+ :param socket: String uri to runtime socket
+ :param runtime: String runtime executable
+ :param nvidia_enabled: Boolean nvidia runtime enabled
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'socket': socket,
+ 'runtime': runtime,
+ 'nvidia_enabled': nvidia_enabled
+ })
diff --git a/containerd/hooks/relations/docker-registry/.gitignore b/containerd/hooks/relations/docker-registry/.gitignore
new file mode 100644
index 0000000..3374ec2
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/.gitignore
@@ -0,0 +1,5 @@
+# Emacs save files
+*~
+\#*\#
+.\#*
+
diff --git a/containerd/hooks/relations/docker-registry/LICENSE b/containerd/hooks/relations/docker-registry/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/hooks/relations/docker-registry/README.md b/containerd/hooks/relations/docker-registry/README.md
new file mode 100644
index 0000000..8257875
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/README.md
@@ -0,0 +1,35 @@
+# Overview
+
+This layer encapsulates the `docker-registry` interface communication
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:docker-registry` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:docker-registry']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `docker-registry` interface protocol:
+
+```yaml
+requires:
+ docker-registry:
+ interface: docker-registry
+```
+
+React to changes from `docker-registry` as follows:
+
+```python
+@when('endpoint.docker-registry.ready')
+ def registry_ready():
+ registry = endpoint_from_flag('endpoint.docker-registry.ready')
+ configure_registry(registry.registry_netloc)
+ if registry.has_auth_basic():
+ configure_auth(registry.basic_user,
+ registry.basic_password)
+```
diff --git a/containerd/hooks/relations/docker-registry/__init__.py b/containerd/hooks/relations/docker-registry/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/containerd/hooks/relations/docker-registry/interface.yaml b/containerd/hooks/relations/docker-registry/interface.yaml
new file mode 100644
index 0000000..339f1c0
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/interface.yaml
@@ -0,0 +1,4 @@
+name: docker-registry
+summary: Docker Registry Interface
+version: 1
+repo: https://github.com/juju-solutions/interface-docker-registry
diff --git a/containerd/hooks/relations/docker-registry/provides.py b/containerd/hooks/relations/docker-registry/provides.py
new file mode 100644
index 0000000..58717b5
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/provides.py
@@ -0,0 +1,114 @@
+"""
+This is the provides side of the interface layer, for use only by the
+docker-registry charm itself.
+The flags that are set by the provides side of this interface are:
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for docker-registry config.
+ The docker-registry integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class DockerRegistryProvides(Endpoint):
+ """
+ Example usage:
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+ @when('endpoint.docker-registry.joined')
+ def configure_client():
+ registry = endpoint_from_flag('endpoint.docker-registry.joined')
+ registry.set_registry_config(netloc, **data)
+ @when('endpoint.docker-registry.requests-pending')
+ def handle_image_request():
+ registry = endpoint_from_flag('endpoint.docker-registry.requests-pending')
+ for request in registry.requests:
+ request.image_data(name, tag)
+ registry.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #RegistryRequests that
+ have been made.
+ """
+ if not hasattr(self, '_requests'):
+ all_requests = [RegistryRequest(unit)
+ for unit in self.all_joined_units]
+ is_changed = attrgetter('is_changed')
+ self._requests = list(filter(is_changed, all_requests))
+ return self._requests
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ clear_flag(self.expand_name('requests-pending'))
+ self._requests = []
+
+ def set_registry_config(self, registry_netloc, **kwargs):
+ """
+ Set the registry config. Minimally, a network location is required.
+ Other data (auth, tls, etc) may also be set.
+ """
+ data = {'registry_netloc': registry_netloc}
+ for k, v in kwargs.items():
+ data[k] = v
+ for relation in self.relations:
+ relation.to_publish.update(data)
+
+
+class RegistryRequest:
+ """
+ A request from a single remote unit to include an image in our registry.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def has_image(self):
+ """
+ Whether or not an image has been processed via `image_data`.
+ """
+ return 'image' in self._unit.relation.to_publish
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ return not self.has_image
+
+ @property
+ def unit_name(self):
+ return self._unit.unit_name
+
+ def image_data(self, image, tag):
+ """
+ Set the image characteristics this request.
+ """
+ data = {
+ 'image': image,
+ 'tag': tag,
+ }
+ self._unit.relation.to_publish.update(data)
diff --git a/containerd/hooks/relations/docker-registry/requires.py b/containerd/hooks/relations/docker-registry/requires.py
new file mode 100644
index 0000000..c638b57
--- /dev/null
+++ b/containerd/hooks/relations/docker-registry/requires.py
@@ -0,0 +1,114 @@
+"""
+This is the requires side of the interface layer, for use in charms that wish
+to request docker-registry data. The data will be provided by the
+docker-registry charm.
+The flags that are set by the requires side of this interface are:
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific registry data. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ config has been enabled for the registry instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+"""
+
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+class DockerRegistryRequires(Endpoint):
+ """
+ Interface to request registry config.
+ Example usage:
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ @when('endpoint.docker-registry.ready')
+ def registry_ready():
+ registry = endpoint_from_flag('endpoint.docker-registry.joined')
+ update_config(registry.registry_netloc)
+ ```
+ """
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single docker-registry application with a
+ single unit.
+ """
+ return self.all_joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single docker-registry application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ def has_auth_basic(self):
+ """
+ Whether or not the registry has basic/htpasswd auth.
+ """
+ return all(field is not None for field in [
+ self.basic_password,
+ self.basic_user,
+ ])
+
+ def has_custom_url(self):
+ """
+ Whether or not the registry has a custom URL.
+ """
+ return all(field is not None for field in [
+ self.registry_url,
+ ])
+
+ def has_tls(self):
+ """
+ Whether or not the registry has TLS certificates configured.
+ """
+ return all(field is not None for field in [
+ self.tls_ca,
+ ])
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ return all(field is not None for field in [
+ self.registry_netloc,
+ ])
+
+ @property
+ def basic_password(self):
+ return self._received.get('basic_password')
+
+ @property
+ def basic_user(self):
+ return self._received.get('basic_user')
+
+ @property
+ def registry_netloc(self):
+ return self._received.get('registry_netloc')
+
+ @property
+ def registry_url(self):
+ return self._received.get('registry_url')
+
+ @property
+ def tls_ca(self):
+ return self._received.get('tls_ca')
diff --git a/containerd/hooks/relations/untrusted-container-runtime/.gitignore b/containerd/hooks/relations/untrusted-container-runtime/.gitignore
new file mode 100644
index 0000000..894a44c
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/.gitignore
@@ -0,0 +1,104 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
diff --git a/containerd/hooks/relations/untrusted-container-runtime/LICENSE b/containerd/hooks/relations/untrusted-container-runtime/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/containerd/hooks/relations/untrusted-container-runtime/README.md b/containerd/hooks/relations/untrusted-container-runtime/README.md
new file mode 100644
index 0000000..135dca5
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/README.md
@@ -0,0 +1,54 @@
+# interface-untrusted-container-runtime
+
+## Overview
+
+This interface handles communication between subordinate container runtimes
+and this subordinate untrusted container runtime, such as `containerd` and
+`kata-containers`.
+
+## Usage
+
+### Provides
+
+The providing side of the container interface provides a place for an
+untrusted container runtime to connect to.
+
+Your charm should respond to the `endpoint.{endpoint_name}.available` state,
+which indicates that there is an untrusted container runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def update_kubelet_config(containerd):
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ config = endpoint.get_config()
+
+ render(
+ 'config.toml',
+ {
+ 'runtime_name': config['name'],
+ 'runtime_binary': config['binary_path']
+ }
+ )
+```
+
+### Requires
+
+The requiring side of the untrusted container interface requires a place for
+an untrusted container runtime to connect to.
+
+Your charm should set `{endpoint_name}.available` state,
+which indicates that the container is runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def pubish_config():
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ endpoint.set_config(
+ 'name': 'kata',
+ 'binary_path': '/usr/bin/kata-runtime'
+ )
+```
diff --git a/containerd/hooks/relations/untrusted-container-runtime/__init__.py b/containerd/hooks/relations/untrusted-container-runtime/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/containerd/hooks/relations/untrusted-container-runtime/interface.yaml b/containerd/hooks/relations/untrusted-container-runtime/interface.yaml
new file mode 100644
index 0000000..d0d7dbc
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/interface.yaml
@@ -0,0 +1,4 @@
+name: untrusted-container-runtime
+summary: Interface for relating to untrusted container runtimes
+version: 1
+maintainer: "Joe Borg "
diff --git a/containerd/hooks/relations/untrusted-container-runtime/provides.py b/containerd/hooks/relations/untrusted-container-runtime/provides.py
new file mode 100644
index 0000000..09deb26
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/provides.py
@@ -0,0 +1,28 @@
+from charms.reactive import (
+ Endpoint,
+ set_flag,
+ clear_flag
+)
+
+from charms.reactive import (
+ when,
+ when_not
+)
+
+
+class ContainerRuntimeProvides(Endpoint):
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('endpoint.{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.available'))
+
+ def get_config(self):
+ """
+ Get the configuration published.
+
+ :return: Dictionary configuration
+ """
+ return self.all_joined_units.received
diff --git a/containerd/hooks/relations/untrusted-container-runtime/requires.py b/containerd/hooks/relations/untrusted-container-runtime/requires.py
new file mode 100644
index 0000000..f717ba6
--- /dev/null
+++ b/containerd/hooks/relations/untrusted-container-runtime/requires.py
@@ -0,0 +1,34 @@
+from charms.reactive import (
+ Endpoint,
+ set_flag,
+ clear_flag
+)
+
+from charms.reactive import (
+ when,
+ when_not
+)
+
+
+class ContainerRuntimeRequires(Endpoint):
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ set_flag(self.expand_name('endpoint.{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.available'))
+
+ def set_config(self, name, binary_path):
+ """
+ Set the configuration to be published.
+
+ :param name: String name of runtime
+ :param binary_path: String runtime executable
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'name': name,
+ 'binary_path': binary_path
+ })
diff --git a/containerd/hooks/start b/containerd/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/stop b/containerd/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/untrusted-relation-broken b/containerd/hooks/untrusted-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/untrusted-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/untrusted-relation-changed b/containerd/hooks/untrusted-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/untrusted-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/untrusted-relation-created b/containerd/hooks/untrusted-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/untrusted-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/untrusted-relation-departed b/containerd/hooks/untrusted-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/untrusted-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/untrusted-relation-joined b/containerd/hooks/untrusted-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/untrusted-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/update-status b/containerd/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/hooks/upgrade-charm b/containerd/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/containerd/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/containerd/icon.svg b/containerd/icon.svg
new file mode 100644
index 0000000..95e747e
--- /dev/null
+++ b/containerd/icon.svg
@@ -0,0 +1,9 @@
+
+
+
diff --git a/containerd/layer.yaml b/containerd/layer.yaml
new file mode 100644
index 0000000..91252cc
--- /dev/null
+++ b/containerd/layer.yaml
@@ -0,0 +1,24 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "layer:status"
+- "layer:debug"
+- "layer:container-runtime-common"
+- "interface:container-runtime"
+- "interface:untrusted-container-runtime"
+- "interface:docker-registry"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests",
+ ".coverage", ".tox", "__pycache__"]
+"options":
+ "basic":
+ "packages": []
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "debug": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "container-runtime-common": {}
+ "containerd": {}
+"repo": "https://github.com/charmed-kubernetes/charm-containerd"
+"is": "containerd"
diff --git a/containerd/lib/charms/layer/__init__.py b/containerd/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/containerd/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/containerd/lib/charms/layer/basic.py b/containerd/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/containerd/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/containerd/lib/charms/layer/container_runtime_common.py b/containerd/lib/charms/layer/container_runtime_common.py
new file mode 100644
index 0000000..a13572a
--- /dev/null
+++ b/containerd/lib/charms/layer/container_runtime_common.py
@@ -0,0 +1,122 @@
+import os
+import shutil
+import ipaddress
+from pathlib import Path
+
+from charmhelpers.core.hookenv import (
+ log,
+ env_proxy_settings
+)
+
+
+certs_dir = Path('/root/cdk')
+ca_crt_path = certs_dir / 'ca.crt'
+server_crt_path = certs_dir / 'server.crt'
+server_key_path = certs_dir / 'server.key'
+client_crt_path = certs_dir / 'client.crt'
+client_key_path = certs_dir / 'client.key'
+
+
+def get_hosts(config):
+ """
+ :param config: Dictionary
+ :return: String
+ """
+ if config is not None:
+ hosts = []
+ for address in config.get('NO_PROXY', '').split(','):
+ address = address.strip()
+ try:
+ net = ipaddress.ip_network(address)
+ ip_addresses = [str(ip) for ip in net.hosts()]
+ if ip_addresses == []:
+ hosts.append(address)
+ else:
+ hosts += ip_addresses
+ except ValueError:
+ hosts.append(address)
+ parsed_hosts = ','.join(hosts)
+ return parsed_hosts
+
+
+def merge_config(config, environment):
+ """
+ :param config: Dictionary
+ :param environment: Dictionary
+ :return: Dictionary
+ """
+ keys = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY']
+
+ for key in keys:
+ if config.get(key.lower(), '') == '' and \
+ config.get(key, '') == '':
+ value = environment.get(key) if environment.get(key, '') != '' \
+ else environment.get(key.lower(), '')
+
+ if value != '':
+ config[key] = value
+ config[key.lower()] = value
+ # Normalize
+ for key in keys:
+ value = config.get(key) if config.get(key, '') != '' \
+ else config.get(key.lower(), '')
+ config[key] = value
+ config[key.lower()] = value
+
+ return config
+
+
+def check_for_juju_https_proxy(config):
+ """
+ If config values are defined take precedent.
+
+ LP: https://bugs.launchpad.net/charm-layer-docker/+bug/1831712
+
+ :param config: Dictionary
+ :return: Dictionary
+ """
+ environment_config = env_proxy_settings()
+ charm_config = dict(config())
+
+ if environment_config is None or \
+ charm_config.get('disable-juju-proxy'):
+ return charm_config
+
+ no_proxy = get_hosts(environment_config)
+
+ environment_config.update({
+ 'NO_PROXY': no_proxy,
+ 'no_proxy': no_proxy
+ })
+
+ return merge_config(charm_config, environment_config)
+
+
+def manage_registry_certs(cert_dir, remove=False):
+ """
+ Add or remove TLS data for a specific registry.
+
+ When present, the container runtime will use certificates when
+ communicating with a specific registry.
+
+ :param cert_dir: String directory to store the client certificates
+ :param remove: Boolean remove cert data (defauts to add)
+ :return: None
+ """
+ if remove:
+ if os.path.isdir(cert_dir):
+ log('Disabling registry TLS: {}.'.format(cert_dir))
+ shutil.rmtree(cert_dir)
+ else:
+ os.makedirs(cert_dir, exist_ok=True)
+ client_tls = {
+ client_crt_path: os.path.join(cert_dir, 'client.cert'),
+ client_key_path: os.path.join(cert_dir, 'client.key')
+ }
+ for f, link in client_tls.items():
+ try:
+ os.remove(link)
+ except FileNotFoundError:
+ pass
+ log('Creating registry TLS link: {}.'.format(link))
+ os.symlink(f, link)
diff --git a/containerd/lib/charms/layer/containerd.py b/containerd/lib/charms/layer/containerd.py
new file mode 100644
index 0000000..3c2e96b
--- /dev/null
+++ b/containerd/lib/charms/layer/containerd.py
@@ -0,0 +1,37 @@
+from charmhelpers.core import hookenv, host, unitdata
+
+
+def get_sandbox_image():
+ """
+ Return the container image location for the sandbox_image.
+
+ Set an appropriate sandbox image based on known registries. Precedence should be:
+ - related docker-registry
+ - default charmed k8s registry (if related to kubernetes)
+ - upstream
+
+ :return: str container image location
+ """
+ db = unitdata.kv()
+ canonical_registry = 'rocks.canonical.com:443/cdk'
+ upstream_registry = 'k8s.gcr.io'
+
+ docker_registry = db.get('registry', None)
+ if docker_registry:
+ sandbox_registry = docker_registry['url']
+ else:
+ try:
+ deployment = hookenv.goal_state()
+ except NotImplementedError:
+ relations = []
+ for rid in hookenv.relation_ids('containerd'):
+ relations.append(hookenv.remote_service_name(rid))
+ else:
+ relations = deployment.get('relations', {}).get('containerd', {})
+
+ if any(k in relations for k in ('kubernetes-master', 'kubernetes-worker')):
+ sandbox_registry = canonical_registry
+ else:
+ sandbox_registry = upstream_registry
+
+ return '{}/pause-{}:3.4.1'.format(sandbox_registry, host.arch())
diff --git a/containerd/lib/charms/layer/execd.py b/containerd/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/containerd/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<"
+"description": |
+ containerd manages the complete container lifecycle of its
+ host system, from image transfer and storage to container
+ execution and supervision to low-level storage to network
+ attachments and beyond.
+"tags":
+- "misc"
+- "containers"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "containerd":
+ "interface": "container-runtime"
+ "scope": "container"
+ "docker-registry":
+ "interface": "docker-registry"
+"provides":
+ "untrusted":
+ "interface": "untrusted-container-runtime"
+ "scope": "container"
+"subordinate": !!bool "true"
diff --git a/containerd/pydocmd.yml b/containerd/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/containerd/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/containerd/reactive/__init__.py b/containerd/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/containerd/reactive/container_runtime_common.py b/containerd/reactive/container_runtime_common.py
new file mode 100644
index 0000000..802c8de
--- /dev/null
+++ b/containerd/reactive/container_runtime_common.py
@@ -0,0 +1,57 @@
+from base64 import b64decode
+from subprocess import check_call
+
+from charms.layer import status
+from charms.reactive import (
+ clear_flag,
+ set_flag,
+ when,
+ when_not
+)
+
+from charmhelpers.core import hookenv, host
+
+
+@when_not('cgroups.modified')
+def enable_grub_cgroups():
+ """
+ Run script to enable cgroups
+ in GRUB. Be aware, this will
+ reboot the host.
+
+ :return: None
+ """
+ cfg = hookenv.config()
+ if cfg.get('enable-cgroups'):
+ hookenv.log('Calling enable_grub_cgroups.sh and rebooting machine.')
+ check_call(['scripts/enable_grub_cgroups.sh'])
+ set_flag('cgroups.modified')
+
+
+@when('config.changed.custom-registry-ca')
+def install_custom_ca():
+ """
+ Installs a configured CA cert into the system-wide location.
+ """
+ ca_cert = hookenv.config().get('custom-registry-ca')
+ if ca_cert:
+ try:
+ # decode to bytes, as that's what install_ca_cert wants
+ _ca = b64decode(ca_cert)
+ except Exception:
+ status.blocked('Invalid base64 value for custom-registry-ca config')
+ return
+ else:
+ host.install_ca_cert(_ca, name='juju-custom-registry')
+ charm = hookenv.charm_name()
+ hookenv.log('Custom registry CA has been installed for {}'.format(charm))
+
+ # manage appropriate charm flags to recycle the runtime daemon
+ if charm == 'docker':
+ clear_flag('docker.available')
+ set_flag('docker.restart')
+ elif charm == 'containerd':
+ set_flag('containerd.restart')
+ else:
+ hookenv.log('Unknown runtime: {}. '
+ 'Cannot request a service restart.'.format(charm))
diff --git a/containerd/reactive/containerd.py b/containerd/reactive/containerd.py
new file mode 100644
index 0000000..8691575
--- /dev/null
+++ b/containerd/reactive/containerd.py
@@ -0,0 +1,697 @@
+import os
+import base64
+import binascii
+import json
+import requests
+import traceback
+
+from subprocess import (
+ check_call,
+ check_output,
+ CalledProcessError
+)
+
+from charms.reactive import (
+ hook,
+ when,
+ when_not,
+ set_state,
+ is_state,
+ remove_state,
+ endpoint_from_flag
+)
+
+from charms.layer import containerd, status
+from charms.layer.container_runtime_common import (
+ ca_crt_path,
+ server_crt_path,
+ server_key_path,
+ check_for_juju_https_proxy
+)
+
+from charmhelpers.core import (
+ host,
+ unitdata
+)
+
+from charmhelpers.core.templating import render
+from charmhelpers.core.hookenv import (
+ atexit,
+ config,
+ log,
+ application_version_set
+)
+
+from charmhelpers.core.kernel import modprobe
+
+from charmhelpers.fetch import (
+ apt_install,
+ apt_update,
+ apt_purge,
+ apt_hold,
+ apt_autoremove,
+ apt_unhold,
+ import_key
+)
+
+
+DB = unitdata.kv()
+
+CONTAINERD_PACKAGE = 'containerd'
+
+NVIDIA_PACKAGES = [
+ 'cuda-drivers',
+ 'nvidia-container-runtime',
+]
+
+
+def _check_containerd():
+ """
+ Check that containerd is running.
+
+ `ctr version` calls both client and server side, so is a reasonable indication that everything's been set up
+ correctly.
+
+ :return: Boolean
+ """
+ try:
+ version = check_output(['ctr', 'version'])
+ except (FileNotFoundError, CalledProcessError):
+ return None
+
+ return version
+
+
+def _juju_proxy_changed():
+ """
+ Check to see if the Juju model HTTP(S) proxy settings have changed.
+
+ These aren't propagated to the charm so we'll need to do it here.
+
+ :return: Boolean
+ """
+ cached = DB.get('config-cache', None)
+ if not cached:
+ return True # First pass.
+
+ new = check_for_juju_https_proxy(config)
+
+ if cached['http_proxy'] == new['http_proxy'] and \
+ cached['https_proxy'] == new['https_proxy'] and \
+ cached['no_proxy'] == new['no_proxy']:
+ return False
+
+ return True
+
+
+@atexit
+def charm_status():
+ """
+ Set the charm's status after each hook is run.
+
+ :return: None
+ """
+ if is_state('upgrade.series.in-progress'):
+ status.blocked('Series upgrade in progress')
+ elif is_state('containerd.nvidia.invalid-option'):
+ status.blocked(
+ '{} is an invalid option for gpu_driver'.format(
+ config().get('gpu_driver')
+ )
+ )
+ elif _check_containerd():
+ status.active('Container runtime available')
+ set_state('containerd.ready')
+ else:
+ status.blocked('Container runtime not available')
+
+
+def strip_url(url):
+ """Strip the URL of protocol, slashes etc., and keep host:port.
+
+ Examples:
+ url: http://10.10.10.10:8000 --> return: 10.10.10.10:8000
+ url: https://myregistry.io:8000/ --> return: myregistry.io:8000
+ url: myregistry.io:8000 --> return: myregistry.io:8000
+ """
+ return url.rstrip('/').split(sep='://', maxsplit=1)[-1]
+
+
+def update_custom_tls_config(config_directory, registries, old_registries):
+ """
+ Read registries config and remove old/write new tls files from/to disk.
+
+ :param str config_directory: containerd config directory
+ :param List registries: juju config for custom registries
+ :param List old_registries: old juju config for custom registries
+ :return: None
+ """
+ # Remove tls files of old registries; so not to leave uneeded, stale files.
+ for registry in old_registries:
+ for opt in ['ca', 'key', 'cert']:
+ file_b64 = registry.get('%s_file' % opt)
+ if file_b64:
+ registry[opt] = os.path.join(
+ config_directory, "%s.%s" % (strip_url(registry['url']), opt)
+ )
+ if os.path.isfile(registry[opt]):
+ os.remove(registry[opt])
+
+ # Write tls files of new registries.
+ for registry in registries:
+ for opt in ['ca', 'key', 'cert']:
+ file_b64 = registry.get('%s_file' % opt)
+ if file_b64:
+ try:
+ file_contents = base64.b64decode(file_b64)
+ except (binascii.Error, TypeError):
+ log(traceback.format_exc())
+ log("{}:{} didn't look like base64 data... skipping"
+ .format(registry['url'], opt))
+ continue
+ registry[opt] = os.path.join(
+ config_directory, "%s.%s" % (strip_url(registry['url']), opt)
+ )
+ with open(registry[opt], 'wb') as f:
+ f.write(file_contents)
+
+
+def populate_host_for_custom_registries(custom_registries):
+ """Populate host field from url if missing for custom registries.
+
+ Examples:
+ url: http://10.10.10.10:8000 --> host: 10.10.10.10:8000
+ url: https://myregistry.io:8000/ --> host: myregistry.io:8000
+ url: myregistry.io:8000 --> host: myregistry.io:8000
+ """
+ # only do minimal changes to custom_registries when conditions apply
+ # otherwise return it directly as it is
+ if isinstance(custom_registries, list):
+ for registry in custom_registries:
+ if not registry.get('host'):
+ url = registry.get('url')
+ if url:
+ registry['host'] = strip_url(url)
+
+ return custom_registries
+
+
+def merge_custom_registries(config_directory, custom_registries,
+ old_custom_registries):
+ """
+ Merge custom registries and Docker registries from relation.
+
+ :param str config_directory: containerd config directory
+ :param str custom_registries: juju config for custom registries
+ :param str old_custom_registries: old juju config for custom registries
+ :return: List Dictionary merged registries
+ """
+ registries = []
+ registries += json.loads(custom_registries)
+ # json string already converted to python list here
+ registries = populate_host_for_custom_registries(registries)
+ old_registries = []
+ if (old_custom_registries):
+ old_registries += json.loads(old_custom_registries)
+ update_custom_tls_config(config_directory, registries, old_registries)
+
+ docker_registry = DB.get('registry', None)
+ if docker_registry:
+ registries.append(docker_registry)
+
+ return registries
+
+
+@hook('update-status')
+def update_status():
+ """
+ Triggered when update-status is called.
+
+ :return: None
+ """
+ if _juju_proxy_changed():
+ set_state('containerd.juju-proxy.changed')
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ """
+ Triggered when upgrade-charm is called.
+
+ :return: None
+ """
+ # Prevent containerd apt pkg from being implicitly updated.
+ apt_hold(CONTAINERD_PACKAGE)
+
+ # Re-render config in case the template has changed in the new charm.
+ config_changed()
+
+
+@when_not('containerd.br_netfilter.enabled')
+def enable_br_netfilter_module():
+ """
+ Enable br_netfilter to work around https://github.com/kubernetes/kubernetes/issues/21613.
+
+ :return: None
+ """
+ try:
+ modprobe('br_netfilter', persist=True)
+ except Exception:
+ log(traceback.format_exc())
+ if host.is_container():
+ log('LXD detected, ignoring failure to load br_netfilter')
+ else:
+ log('LXD not detected, will retry loading br_netfilter')
+ return
+ set_state('containerd.br_netfilter.enabled')
+
+
+@when_not('containerd.ready',
+ 'containerd.installed',
+ 'endpoint.containerd.departed')
+def install_containerd():
+ """
+ Install containerd and then create initial configuration.
+
+ :return: None
+ """
+ status.maintenance('Installing containerd via apt')
+ apt_update()
+ apt_install(CONTAINERD_PACKAGE, fatal=True)
+ apt_hold(CONTAINERD_PACKAGE)
+
+ set_state('containerd.installed')
+ config_changed()
+
+
+@when('containerd.installed')
+@when_not('containerd.version-published')
+def publish_version_to_juju():
+ """
+ Publish the containerd version to Juju.
+
+ :return: None
+ """
+ version_string = _check_containerd()
+ if not version_string:
+ return
+ version = version_string.split()[6].split(b'-')[0].decode()
+
+ application_version_set(version)
+ set_state('containerd.version-published')
+
+
+@when_not('containerd.nvidia.checked')
+@when_not('endpoint.containerd.departed')
+def check_for_gpu():
+ """
+ Check if an Nvidia GPU exists.
+
+ :return: None
+ """
+ valid_options = [
+ 'auto',
+ 'none',
+ 'nvidia'
+ ]
+
+ driver_config = config().get('gpu_driver')
+ if driver_config not in valid_options:
+ set_state('containerd.nvidia.invalid-option')
+ return
+
+ out = check_output(['lspci', '-nnk']).rstrip().decode('utf-8').lower()
+
+ if driver_config != 'none':
+ if (out.count('nvidia') > 0 and driver_config == 'auto') \
+ or (driver_config == 'nvidia'):
+ set_state('containerd.nvidia.available')
+ else:
+ remove_state('containerd.nvidia.available')
+ remove_state('containerd.nvidia.ready')
+
+ remove_state('containerd.nvidia.invalid-option')
+ set_state('containerd.nvidia.checked')
+
+
+@when('containerd.nvidia.available')
+@when_not('containerd.nvidia.ready', 'endpoint.containerd.departed')
+def configure_nvidia():
+ """
+ Based on charm config, install and configure Nivida drivers.
+
+ :return: None
+ """
+ status.maintenance('Installing Nvidia drivers.')
+
+ dist = host.lsb_release()
+ release = '{}{}'.format(
+ dist['DISTRIB_ID'].lower(),
+ dist['DISTRIB_RELEASE']
+ )
+ proxies = {
+ "http": config('http_proxy'),
+ "https": config('https_proxy')
+ }
+ ncr_gpg_key = requests.get(
+ 'https://nvidia.github.io/nvidia-container-runtime/gpgkey', proxies=proxies).text
+ import_key(ncr_gpg_key)
+ with open(
+ '/etc/apt/sources.list.d/nvidia-container-runtime.list', 'w'
+ ) as f:
+ f.write(
+ 'deb '
+ 'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n'
+ .format(release)
+ )
+ f.write(
+ 'deb '
+ 'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n'
+ .format(release)
+ )
+
+ cuda_gpg_key = requests.get(
+ 'https://developer.download.nvidia.com/'
+ 'compute/cuda/repos/{}/x86_64/7fa2af80.pub'
+ .format(release.replace('.', '')), proxies=proxies
+ ).text
+ import_key(cuda_gpg_key)
+ with open('/etc/apt/sources.list.d/cuda.list', 'w') as f:
+ f.write(
+ 'deb '
+ 'http://developer.download.nvidia.com/'
+ 'compute/cuda/repos/{}/x86_64 /\n'
+ .format(release.replace('.', ''))
+ )
+
+ apt_update()
+
+ apt_install(NVIDIA_PACKAGES, fatal=True)
+
+ set_state('containerd.nvidia.ready')
+ config_changed()
+
+
+@when('endpoint.containerd.departed')
+def purge_containerd():
+ """
+ Purge Containerd from the cluster.
+
+ :return: None
+ """
+ status.maintenance('Removing containerd from principal')
+
+ host.service_stop('containerd.service')
+ apt_unhold(CONTAINERD_PACKAGE)
+ apt_purge(CONTAINERD_PACKAGE, fatal=True)
+
+ if is_state('containerd.nvidia.ready'):
+ apt_purge(NVIDIA_PACKAGES, fatal=True)
+
+ sources = [
+ '/etc/apt/sources.list.d/cuda.list',
+ '/etc/apt/sources.list.d/nvidia-container-runtime.list'
+ ]
+
+ for f in sources:
+ if os.path.isfile(f):
+ os.remove(f)
+
+ apt_autoremove(purge=True, fatal=True)
+
+ remove_state('containerd.ready')
+ remove_state('containerd.installed')
+ remove_state('containerd.nvidia.ready')
+ remove_state('containerd.nvidia.checked')
+ remove_state('containerd.nvidia.available')
+ remove_state('containerd.version-published')
+
+
+@when('config.changed.gpu_driver')
+def gpu_config_changed():
+ """
+ Remove the GPU checked state when the config is changed.
+
+ :return: None
+ """
+ remove_state('containerd.nvidia.checked')
+
+
+@when('config.changed')
+@when_not('endpoint.containerd.departed')
+def config_changed():
+ """
+ Render the config template.
+
+ :return: None
+ """
+ if _juju_proxy_changed():
+ set_state('containerd.juju-proxy.changed')
+
+ # Create "dumb" context based on Config to avoid triggering config.changed
+ context = dict(config())
+
+ config_file = 'config.toml'
+ config_directory = '/etc/containerd'
+
+ endpoint = endpoint_from_flag('endpoint.containerd.available')
+ if endpoint:
+ sandbox_image = endpoint.get_sandbox_image()
+ if sandbox_image:
+ log('Setting sandbox_image to: {}'.format(sandbox_image))
+ context['sandbox_image'] = sandbox_image
+ else:
+ context['sandbox_image'] = containerd.get_sandbox_image()
+ else:
+ context['sandbox_image'] = containerd.get_sandbox_image()
+
+ if not os.path.isdir(config_directory):
+ os.mkdir(config_directory)
+
+ # If custom_registries changed, make sure to remove old tls files.
+ if config().changed('custom_registries'):
+ old_custom_registries = config().previous('custom_registries')
+ else:
+ old_custom_registries = None
+
+ context['custom_registries'] = \
+ merge_custom_registries(config_directory, context['custom_registries'],
+ old_custom_registries)
+
+ untrusted = DB.get('untrusted')
+ if untrusted:
+ context['untrusted'] = True
+ context['untrusted_name'] = untrusted['name']
+ context['untrusted_path'] = untrusted['binary_path']
+ context['untrusted_binary'] = os.path.basename(
+ untrusted['binary_path'])
+
+ else:
+ context['untrusted'] = False
+
+ if is_state('containerd.nvidia.available') \
+ and context.get('runtime') == 'auto':
+ context['runtime'] = 'nvidia-container-runtime'
+ if not is_state('containerd.nvidia.available') \
+ and context.get('runtime') == 'auto':
+ context['runtime'] = 'runc'
+
+ render(
+ config_file,
+ os.path.join(config_directory, config_file),
+ context
+ )
+
+ set_state('containerd.restart')
+
+
+@when('containerd.installed')
+@when('containerd.juju-proxy.changed')
+@when_not('endpoint.containerd.departed')
+def proxy_changed():
+ """
+ Apply new proxy settings.
+
+ :return: None
+ """
+ # Create "dumb" context based on Config
+ # to avoid triggering config.changed.
+ context = check_for_juju_https_proxy(config)
+
+ service_file = 'proxy.conf'
+ service_directory = '/etc/systemd/system/containerd.service.d'
+ service_path = os.path.join(service_directory, service_file)
+
+ if context.get('http_proxy') or \
+ context.get('https_proxy') or context.get('no_proxy'):
+
+ os.makedirs(service_directory, exist_ok=True)
+
+ log('Proxy changed, writing new file to {}'.format(service_path))
+ render(
+ service_file,
+ service_path,
+ context
+ )
+
+ else:
+ try:
+ log('Proxy cleaned, removing file {}'.format(service_path))
+ os.remove(service_path)
+ except FileNotFoundError:
+ return # We don't need to restart the daemon.
+
+ DB.set('config-cache', context)
+
+ remove_state('containerd.juju-proxy.changed')
+ check_call(['systemctl', 'daemon-reload'])
+ set_state('containerd.restart')
+
+
+@when('containerd.restart')
+@when_not('endpoint.containerd.departed')
+def restart_containerd():
+ """
+ Restart the containerd service.
+
+ If the restart fails, this function will log a message and be retried on
+ the next hook.
+ """
+ status.maintenance('Restarting containerd')
+ if host.service_restart('containerd.service'):
+ remove_state('containerd.restart')
+ else:
+ log('Failed to restart containerd; will retry')
+
+
+@when('containerd.ready')
+@when('endpoint.containerd.joined')
+@when_not('endpoint.containerd.departed')
+def publish_config():
+ """
+ Pass configuration to principal charm.
+
+ :return: None
+ """
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ endpoint.set_config(
+ socket='unix:///var/run/containerd/containerd.sock',
+ runtime='remote', # TODO handle in k8s worker.
+ nvidia_enabled=is_state('containerd.nvidia.available')
+ )
+
+
+@when('endpoint.untrusted.available')
+@when_not('untrusted.configured')
+@when_not('endpoint.containerd.departed')
+def untrusted_available():
+ """
+ Handle untrusted container runtime.
+
+ :return: None
+ """
+ untrusted_runtime = endpoint_from_flag('endpoint.untrusted.available')
+ received = dict(untrusted_runtime.get_config())
+
+ if 'name' not in received.keys():
+ return # Try until config is available.
+
+ DB.set('untrusted', received)
+ config_changed()
+
+ set_state('untrusted.configured')
+
+
+@when('endpoint.untrusted.departed')
+def untrusted_departed():
+ """
+ Handle untrusted container runtime.
+
+ :return: None
+ """
+ DB.unset('untrusted')
+ DB.flush()
+ config_changed()
+
+ remove_state('untrusted.configured')
+
+
+@when('endpoint.docker-registry.ready')
+@when_not('containerd.registry.configured')
+def configure_registry():
+ """
+ Add docker registry config when present.
+
+ :return: None
+ """
+ registry = endpoint_from_flag('endpoint.docker-registry.ready')
+
+ docker_registry = {
+ 'url': registry.registry_netloc
+ }
+
+ # Handle auth data.
+ if registry.has_auth_basic():
+ docker_registry['username'] = registry.basic_user
+ docker_registry['password'] = registry.basic_password
+
+ # Handle TLS data.
+ if registry.has_tls():
+ # Ensure the CA that signed our registry cert is trusted.
+ host.install_ca_cert(registry.tls_ca, name='juju-docker-registry')
+
+ docker_registry['ca'] = str(ca_crt_path)
+ docker_registry['key'] = str(server_key_path)
+ docker_registry['cert'] = str(server_crt_path)
+
+ DB.set('registry', docker_registry)
+
+ config_changed()
+ set_state('containerd.registry.configured')
+
+
+@when('endpoint.docker-registry.changed',
+ 'containerd.registry.configured')
+def reconfigure_registry():
+ """
+ Signal to update the registry config when something changes.
+
+ :return: None
+ """
+ remove_state('containerd.registry.configured')
+
+
+@when('endpoint.containerd.reconfigure')
+@when_not('endpoint.containerd.departed')
+def container_runtime_relation_changed():
+ """
+ Run config_changed to use any new config from the endpoint.
+
+ :return: None
+ """
+ config_changed()
+ endpoint = endpoint_from_flag('endpoint.containerd.reconfigure')
+ endpoint.handle_remote_config()
+
+
+@when('containerd.registry.configured')
+@when_not('endpoint.docker-registry.joined')
+def remove_registry():
+ """
+ Remove registry config when the registry is no longer present.
+
+ :return: None
+ """
+ docker_registry = DB.get('registry', None)
+
+ if docker_registry:
+ # Remove from DB.
+ DB.unset('registry')
+ DB.flush()
+
+ # Remove auth-related data.
+ log('Disabling auth for docker registry: {}.'.format(
+ docker_registry['url']))
+
+ config_changed()
+ remove_state('containerd.registry.configured')
diff --git a/containerd/reactive/status.py b/containerd/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/containerd/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/containerd/requirements.txt b/containerd/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/containerd/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/containerd/revision b/containerd/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/containerd/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/containerd/scripts/enable_grub_cgroups.sh b/containerd/scripts/enable_grub_cgroups.sh
new file mode 100755
index 0000000..27182fe
--- /dev/null
+++ b/containerd/scripts/enable_grub_cgroups.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+# Edits the grub defaults file to append the GRUB_CMDLINE_LINUX options and executes a
+# `juju-reboot` to reboot the machine.
+
+status-set maintenance "Configuring and updating grub"
+
+sed -i 's/GRUB_CMDLINE_LINUX=\"\"/GRUB_CMDLINE_LINUX=\"cgroup_enable=memory swapaccount=1\"/' /etc/default/grub
+update-grub
+status-set maintenance "Rebooting the machine"
+
+juju-reboot
diff --git a/containerd/templates/config.toml b/containerd/templates/config.toml
new file mode 100644
index 0000000..69064bc
--- /dev/null
+++ b/containerd/templates/config.toml
@@ -0,0 +1,113 @@
+root = "/var/lib/containerd"
+state = "/run/containerd"
+oom_score = 0
+
+[grpc]
+ address = "/run/containerd/containerd.sock"
+ uid = 0
+ gid = 0
+ max_recv_message_size = 16777216
+ max_send_message_size = 16777216
+
+[debug]
+ address = ""
+ uid = 0
+ gid = 0
+ level = ""
+
+[metrics]
+ address = ""
+ grpc_histogram = false
+
+[cgroup]
+ path = ""
+
+[plugins]
+ [plugins.cgroups]
+ no_prometheus = false
+ [plugins.cri]
+ stream_server_address = "127.0.0.1"
+ stream_server_port = "0"
+ enable_selinux = false
+ sandbox_image = "{{ sandbox_image }}"
+ stats_collect_period = 10
+ systemd_cgroup = false
+ enable_tls_streaming = false
+ max_container_log_line_size = 16384
+ [plugins.cri.containerd]
+ no_pivot = false
+ [plugins.cri.containerd.default_runtime]
+ runtime_type = "io.containerd.runtime.v1.linux"
+ {% if untrusted %}
+ [plugins.cri.containerd.untrusted_workload_runtime]
+ runtime_type= "io.containerd.{{ untrusted_name }}.v2"
+ {% endif %}
+ [plugins.cri.containerd.runtimes]
+ [plugins.cri.containerd.runtimes.runc]
+ runtime_type = "io.containerd.runc.v1"
+ {% if untrusted %}
+ [plugins.cri.containerd.runtimes.{{ untrusted_name }}]
+ runtime_type= "io.containerd.{{ untrusted_name }}.v2"
+ [plugins.cri.containerd.runtimes.{{ untrusted_name }}.options]
+ Runtime = "{{ untrusted_binary }}"
+ RuntimeRoot = "{{ untrusted_path }}"
+ {% endif %}
+ [plugins.cri.cni]
+ bin_dir = "/opt/cni/bin"
+ conf_dir = "/etc/cni/net.d"
+ conf_template = ""
+ [plugins.cri.registry]
+ [plugins.cri.registry.mirrors]
+ [plugins.cri.registry.mirrors."docker.io"]
+ endpoint = ["https://registry-1.docker.io"]
+ {% if custom_registries -%}
+ {% for registry in custom_registries -%}
+ {% if registry.host -%}
+ [plugins.cri.registry.mirrors."{{ registry.host }}"]
+ {% if registry.url -%}
+ endpoint = ["{{ registry.url}}"]
+ {% endif -%}
+ {% endif -%}
+ {% endfor -%}
+ {% endif -%}
+ {% if custom_registries %}
+ [plugins.cri.registry.auths]
+ {% for registry in custom_registries %}
+ {% if registry.username and registry.password %}
+ [plugins.cri.registry.auths."{{ registry.url }}"]
+ username = "{{ registry.username }}"
+ password = "{{ registry.password }}"
+ {% endif %}
+ {% endfor %}
+ [plugins.cri.registry.configs]
+ {% for registry in custom_registries %}
+ {% if registry.ca or registry.cert or registry.key or registry.insecure_skip_verify %}
+ [plugins.cri.registry.configs."{{ registry.url }}".tls]
+ ca_file = "{{ registry.ca if registry.ca else '' }}"
+ cert_file = "{{ registry.cert if registry.cert else '' }}"
+ key_file = "{{ registry.key if registry.key else '' }}"
+ insecure_skip_verify = {{ "true" if registry.insecure_skip_verify else "false" }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ [plugins.cri.x509_key_pair_streaming]
+ tls_cert_file = ""
+ tls_key_file = ""
+ [plugins.diff-service]
+ default = ["walking"]
+ [plugins.linux]
+ shim = "{{ shim }}"
+ runtime = "{{ runtime }}"
+ runtime_root = ""
+ no_shim = false
+ shim_debug = false
+ [plugins.opt]
+ path = "/opt/containerd"
+ [plugins.restart]
+ interval = "10s"
+ [plugins.scheduler]
+ pause_threshold = 0.02
+ deletion_threshold = 0
+ mutation_threshold = 100
+ schedule_delay = "0s"
+ startup_delay = "100ms"
diff --git a/containerd/templates/proxy.conf b/containerd/templates/proxy.conf
new file mode 100644
index 0000000..9a9ced4
--- /dev/null
+++ b/containerd/templates/proxy.conf
@@ -0,0 +1,2 @@
+[Service]
+Environment="HTTP_PROXY={{ http_proxy }}" "HTTPS_PROXY={{ https_proxy }}" "NO_PROXY={{ no_proxy }}"
\ No newline at end of file
diff --git a/containerd/tests/conftest.py b/containerd/tests/conftest.py
new file mode 100644
index 0000000..2f710e8
--- /dev/null
+++ b/containerd/tests/conftest.py
@@ -0,0 +1,40 @@
+import os
+import sys
+from unittest.mock import MagicMock
+
+
+def identity(x):
+ return x
+
+
+# mock dependencies which we don't care about covering in our tests
+ch = MagicMock()
+sys.modules['charmhelpers'] = ch
+sys.modules['charmhelpers.core'] = ch.core
+sys.modules['charmhelpers.core.unitdata'] = ch.core.unitdata
+sys.modules['charmhelpers.core.hookenv'] = ch.core.hookenv
+sys.modules['charmhelpers.core.host'] = ch.core.host
+sys.modules['charmhelpers.core.templating'] = ch.core.templating
+sys.modules['charmhelpers.contrib'] = ch.contrib
+sys.modules['charmhelpers.contrib.charmsupport'] = ch.contrib.charmsupport
+
+reactive = MagicMock()
+sys.modules['charms.reactive'] = reactive
+reactive.when.return_value = identity
+reactive.when_any.return_value = identity
+reactive.when_not.return_value = identity
+reactive.when_none.return_value = identity
+reactive.hook.return_value = identity
+
+leadership = MagicMock()
+sys.modules['charms.leadership'] = leadership
+
+charms = MagicMock()
+sys.modules['charms'] = charms
+sys.modules['charms.coordinator'] = charms.coordinator
+sys.modules['charms.layer'] = charms.layer
+sys.modules['charms.layer.hacluster'] = charms.layer.hacluster
+sys.modules['charms.layer.kubernetes_common'] = charms.layer.kubernetes_common
+sys.modules['charms.layer.nagios'] = charms.layer.nagios
+
+os.environ['JUJU_MODEL_UUID'] = 'test-1234'
diff --git a/containerd/tests/test_cidr_notation.py b/containerd/tests/test_cidr_notation.py
new file mode 100644
index 0000000..35f38f1
--- /dev/null
+++ b/containerd/tests/test_cidr_notation.py
@@ -0,0 +1,25 @@
+from lib.charms.layer.container_runtime_common import (
+ get_hosts
+)
+
+
+def test_get_hosts():
+ CONFIG = {
+ 'NO_PROXY': "192.168.2.1, 192.168.2.0/29, hello.com"
+ }
+
+ hosts = get_hosts(CONFIG)
+
+ assert hosts == "192.168.2.1,192.168.2.1,\
+192.168.2.2,192.168.2.3,192.168.2.4,192.168.2.5,\
+192.168.2.6,hello.com"
+
+
+def test_return_conf():
+ CONFIG = {
+ 'NO_PROXY': ""
+ }
+
+ hosts = get_hosts(CONFIG)
+
+ assert hosts == ""
diff --git a/containerd/tests/test_merge_config.py b/containerd/tests/test_merge_config.py
new file mode 100644
index 0000000..3a2e2d5
--- /dev/null
+++ b/containerd/tests/test_merge_config.py
@@ -0,0 +1,76 @@
+from lib.charms.layer.container_runtime_common import (
+ merge_config
+)
+
+
+def test_get_hosts():
+ CONFIG = {
+ 'NO_PROXY': '192.168.2.1, 192.168.2.0/29, hello.com',
+ 'https_proxy': 'https://hop.proxy',
+ 'HTTP_PROXY': '',
+
+ }
+ ENVIRONMENT = {
+ 'HTTPS_PROXY': 'https://proxy.hop',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'no_proxy': 'not tha proxy'
+ }
+
+ merged = merge_config(CONFIG, ENVIRONMENT)
+
+ assert merged == {
+ 'NO_PROXY': '192.168.2.1, 192.168.2.0/29, hello.com',
+ 'HTTPS_PROXY': 'https://hop.proxy',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'no_proxy': '192.168.2.1, 192.168.2.0/29, hello.com',
+ 'https_proxy': 'https://hop.proxy',
+ 'http_proxy': 'http://proxy.hop'
+ }
+
+
+def test_get_hosts_no_local_conf():
+ CONFIG = {
+ 'NO_PROXY': '',
+ 'https_proxy': '',
+ 'HTTP_PROXY': '',
+ }
+ ENVIRONMENT = {
+ 'HTTPS_PROXY': 'https://proxy.hop',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'no_proxy': 'not tha proxy'
+ }
+
+ merged = merge_config(CONFIG, ENVIRONMENT)
+
+ assert merged == {
+ 'HTTPS_PROXY': 'https://proxy.hop',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'NO_PROXY': 'not tha proxy',
+ 'https_proxy': 'https://proxy.hop',
+ 'http_proxy': 'http://proxy.hop',
+ 'no_proxy': 'not tha proxy'
+ }
+
+
+def test_get_hosts_no_env_conf():
+ ENVIRONMENT = {
+ 'NO_PROXY': '',
+ 'HTTPS_PROXY': '',
+ 'HTTP_PROXY': '',
+ }
+ CONFIG = {
+ 'HTTPS_PROXY': 'https://proxy.hop',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'no_proxy': 'not tha proxy'
+ }
+
+ merged = merge_config(CONFIG, ENVIRONMENT)
+
+ assert merged == {
+ 'HTTPS_PROXY': 'https://proxy.hop',
+ 'HTTP_PROXY': 'http://proxy.hop',
+ 'NO_PROXY': 'not tha proxy',
+ 'no_proxy': 'not tha proxy',
+ 'https_proxy': 'https://proxy.hop',
+ 'http_proxy': 'http://proxy.hop',
+ }
diff --git a/containerd/tests/test_reactive.py b/containerd/tests/test_reactive.py
new file mode 100644
index 0000000..3c2a549
--- /dev/null
+++ b/containerd/tests/test_reactive.py
@@ -0,0 +1,62 @@
+import pytest
+from unittest.mock import patch, ANY
+
+from charms.reactive import set_flag
+from reactive import container_runtime_common
+
+
+def patch_fixture(patch_target):
+ @pytest.fixture()
+ def _fixture():
+ with patch(patch_target) as m:
+ yield m
+ return _fixture
+
+
+check_call = patch_fixture('reactive.container_runtime_common.check_call')
+hookenv = patch_fixture('reactive.container_runtime_common.hookenv')
+install_ca = patch_fixture('charmhelpers.core.host.install_ca_cert')
+status = patch_fixture('reactive.container_runtime_common.status')
+
+
+def test_enable_cgroups(hookenv, check_call):
+ """Verify expected flags for enable-groups config."""
+ # Should not set a flag when config is false
+ hookenv.config.return_value = {'enable-cgroups': False}
+ container_runtime_common.enable_grub_cgroups()
+ set_flag.assert_not_called()
+
+ # Should set a flag when config is true
+ hookenv.config.return_value = {'enable-cgroups': True}
+ container_runtime_common.enable_grub_cgroups()
+ set_flag.assert_called_once_with('cgroups.modified')
+
+
+def test_install_custom_ca(hookenv, install_ca, status):
+ """Verify we set a custom CA cert when appropriate."""
+ # Should not block nor call install_ca_cert when no config is present
+ hookenv.config.return_value = {}
+ container_runtime_common.install_custom_ca()
+ status.blocked.assert_not_called()
+ install_ca.assert_not_called()
+
+ status.reset_mock()
+ install_ca.reset_mock()
+
+ # Should block and not call install_ca_cert if called with bad data
+ hookenv.config.return_value = {'custom-registry-ca': 'bad'}
+ container_runtime_common.install_custom_ca()
+ status.blocked.assert_called_once_with(ANY)
+ install_ca.assert_not_called()
+
+ status.reset_mock()
+ install_ca.reset_mock()
+
+ # Should call install_ca_cert and not block if called with good data
+ hookenv.config.return_value = {'custom-registry-ca': 'Z29vZAo='}
+ container_runtime_common.install_custom_ca()
+ status.blocked.assert_not_called()
+ install_ca.assert_called_once_with(ANY, name='juju-custom-registry')
+
+ status.reset_mock()
+ install_ca.reset_mock()
diff --git a/containerd/tox.ini b/containerd/tox.ini
new file mode 100644
index 0000000..81cdf4d
--- /dev/null
+++ b/containerd/tox.ini
@@ -0,0 +1,27 @@
+[flake8]
+max-line-length = 88
+
+[tox]
+skipsdist = True
+envlist = lint,py3
+
+[tox:travis]
+3.5: lint,py3
+3.6: lint,py3
+3.7: lint,py3
+3.8: lint,py3
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}:{toxinidir}/lib
+deps =
+ pyyaml
+ pytest
+ flake8
+ ipdb
+commands = pytest --tb native -s {posargs}
+
+[testenv:lint]
+envdir = {toxworkdir}/py3
+commands = flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests
diff --git a/containerd/version b/containerd/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/containerd/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/containerd/wheelhouse.txt b/containerd/wheelhouse.txt
new file mode 100644
index 0000000..3bd771d
--- /dev/null
+++ b/containerd/wheelhouse.txt
@@ -0,0 +1,19 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
+# containerd
+requests>=2.0.0,<3.0.0
+
diff --git a/containerd/wheelhouse/Jinja2-2.10.1.tar.gz b/containerd/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/containerd/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/containerd/wheelhouse/MarkupSafe-1.1.1.tar.gz b/containerd/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/containerd/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/containerd/wheelhouse/PyYAML-5.2.tar.gz b/containerd/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/containerd/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/containerd/wheelhouse/Tempita-0.5.2.tar.gz b/containerd/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/containerd/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/containerd/wheelhouse/certifi-2020.12.5.tar.gz b/containerd/wheelhouse/certifi-2020.12.5.tar.gz
new file mode 100644
index 0000000..3023d0a
Binary files /dev/null and b/containerd/wheelhouse/certifi-2020.12.5.tar.gz differ
diff --git a/containerd/wheelhouse/chardet-4.0.0.tar.gz b/containerd/wheelhouse/chardet-4.0.0.tar.gz
new file mode 100644
index 0000000..6bfc4e3
Binary files /dev/null and b/containerd/wheelhouse/chardet-4.0.0.tar.gz differ
diff --git a/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz b/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/containerd/wheelhouse/charms.reactive-1.4.1.tar.gz b/containerd/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/containerd/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/containerd/wheelhouse/idna-2.10.tar.gz b/containerd/wheelhouse/idna-2.10.tar.gz
new file mode 100644
index 0000000..e9a9e03
Binary files /dev/null and b/containerd/wheelhouse/idna-2.10.tar.gz differ
diff --git a/containerd/wheelhouse/netaddr-0.7.19.tar.gz b/containerd/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/containerd/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/containerd/wheelhouse/pbr-5.6.0.tar.gz b/containerd/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/containerd/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/containerd/wheelhouse/pip-18.1.tar.gz b/containerd/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/containerd/wheelhouse/pip-18.1.tar.gz differ
diff --git a/containerd/wheelhouse/pyaml-20.4.0.tar.gz b/containerd/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/containerd/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/containerd/wheelhouse/requests-2.25.1.tar.gz b/containerd/wheelhouse/requests-2.25.1.tar.gz
new file mode 100644
index 0000000..9dcfcf2
Binary files /dev/null and b/containerd/wheelhouse/requests-2.25.1.tar.gz differ
diff --git a/containerd/wheelhouse/setuptools-41.6.0.zip b/containerd/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/containerd/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/containerd/wheelhouse/setuptools_scm-1.17.0.tar.gz b/containerd/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/containerd/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/containerd/wheelhouse/six-1.15.0.tar.gz b/containerd/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/containerd/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/containerd/wheelhouse/urllib3-1.26.4.tar.gz b/containerd/wheelhouse/urllib3-1.26.4.tar.gz
new file mode 100644
index 0000000..4d693e7
Binary files /dev/null and b/containerd/wheelhouse/urllib3-1.26.4.tar.gz differ
diff --git a/containerd/wheelhouse/wheel-0.33.6.tar.gz b/containerd/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/containerd/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/easyrsa/.build.manifest b/easyrsa/.build.manifest
new file mode 100644
index 0000000..3652ef7
--- /dev/null
+++ b/easyrsa/.build.manifest
@@ -0,0 +1,516 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
+ "url": "layer:leadership"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "44f635b92624be5882c70ca1544d79f5d8483e24",
+ "url": "easyrsa"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d9850016d930a6d507b9fd45e2598d327922b140",
+ "url": "interface:tls-certificates"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/tox.yaml": [
+ "easyrsa",
+ "static",
+ "c323f9ca1fe5bf1369f80d8958be49ad8fd2f6635528865017c357591d31542e"
+ ],
+ ".gitignore": [
+ "easyrsa",
+ "static",
+ "3d3d61b1e6228c5d03ea369331e493d0688f94416a0384c5c0b41194e4297d33"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "CONTRIBUTING.md": [
+ "easyrsa",
+ "static",
+ "8cb96f21e7dddc5b0f0ee7ced5168566b3534a98dd7dcb8f0b459846ef420e1e"
+ ],
+ "LICENSE": [
+ "easyrsa",
+ "static",
+ "f02fd85a4171482f6bb1d6f87fe0704d3a2da93eca04afe39a0310a00c409902"
+ ],
+ "Makefile": [
+ "layer:basic",
+ "static",
+ "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
+ ],
+ "README.md": [
+ "easyrsa",
+ "static",
+ "0face8a003629fcfe025bc339558160b3b5a88f5bfc53c904e931d242c4d0135"
+ ],
+ "actions.yaml": [
+ "easyrsa",
+ "dynamic",
+ "68a27394e89885a22fbe5fe9ea746c3f6354e5883c4b2d70d6c1df1821116037"
+ ],
+ "actions/__init__.py": [
+ "easyrsa",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "actions/actions.py": [
+ "easyrsa",
+ "static",
+ "1a7894d5a5bc95629f5447ea3cee52d71682f8b095b0e58fea6fd017e8d1fbc8"
+ ],
+ "actions/backup": [
+ "easyrsa",
+ "static",
+ "1a7894d5a5bc95629f5447ea3cee52d71682f8b095b0e58fea6fd017e8d1fbc8"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "actions/delete-backup": [
+ "easyrsa",
+ "static",
+ "1a7894d5a5bc95629f5447ea3cee52d71682f8b095b0e58fea6fd017e8d1fbc8"
+ ],
+ "actions/list-backups": [
+ "easyrsa",
+ "static",
+ "1a7894d5a5bc95629f5447ea3cee52d71682f8b095b0e58fea6fd017e8d1fbc8"
+ ],
+ "actions/restore": [
+ "easyrsa",
+ "static",
+ "1a7894d5a5bc95629f5447ea3cee52d71682f8b095b0e58fea6fd017e8d1fbc8"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "easyrsa",
+ "dynamic",
+ "d047e48447d374c91de7e90fd46a624f93adf76647a08a78eed40f180d16b29e"
+ ],
+ "copyright": [
+ "easyrsa",
+ "static",
+ "b66cc4de4ddcf30883160876280b9e1f9fbea0ddb4e62cc58685a1913c23f746"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-leadership": [
+ "layer:leadership",
+ "static",
+ "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-status": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "hooks/client-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/client-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/client-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/client-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/client-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/tls-certificates/.gitignore": [
+ "interface:tls-certificates",
+ "static",
+ "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc"
+ ],
+ "hooks/relations/tls-certificates/README.md": [
+ "interface:tls-certificates",
+ "static",
+ "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a"
+ ],
+ "hooks/relations/tls-certificates/__init__.py": [
+ "interface:tls-certificates",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/docs/common.md": [
+ "interface:tls-certificates",
+ "static",
+ "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff"
+ ],
+ "hooks/relations/tls-certificates/docs/provides.md": [
+ "interface:tls-certificates",
+ "static",
+ "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe"
+ ],
+ "hooks/relations/tls-certificates/docs/requires.md": [
+ "interface:tls-certificates",
+ "static",
+ "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e"
+ ],
+ "hooks/relations/tls-certificates/interface.yaml": [
+ "interface:tls-certificates",
+ "static",
+ "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa"
+ ],
+ "hooks/relations/tls-certificates/make_docs": [
+ "interface:tls-certificates",
+ "static",
+ "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37"
+ ],
+ "hooks/relations/tls-certificates/provides.py": [
+ "interface:tls-certificates",
+ "static",
+ "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c"
+ ],
+ "hooks/relations/tls-certificates/pydocmd.yml": [
+ "interface:tls-certificates",
+ "static",
+ "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8"
+ ],
+ "hooks/relations/tls-certificates/requires.py": [
+ "interface:tls-certificates",
+ "static",
+ "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba"
+ ],
+ "hooks/relations/tls-certificates/tls_certificates_common.py": [
+ "interface:tls-certificates",
+ "static",
+ "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "easyrsa",
+ "static",
+ "e5b03f7e2c5f7948e595138412b634a16c45e06b500041f56f6062ab68d11b94"
+ ],
+ "layer.yaml": [
+ "easyrsa",
+ "dynamic",
+ "230791f1d771e8ba06f98403bd1b99996752eb5290cd89b51d4f66fd725e0819"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/charms/leadership.py": [
+ "layer:leadership",
+ "static",
+ "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "easyrsa",
+ "dynamic",
+ "0ed64e36422eed5302d8fd152567795c18784d5726f84b5c11080359e169f59a"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "reactive/__init__.py": [
+ "layer:leadership",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/easyrsa.py": [
+ "easyrsa",
+ "static",
+ "8739e7c51bdda309bab48754a44218a565032aa66137c8b056075c173d07ecc4"
+ ],
+ "reactive/leadership.py": [
+ "layer:leadership",
+ "static",
+ "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "tox.ini": [
+ "layer:status",
+ "static",
+ "2669a78e8e51c1606874e1cc97ca99e660ff547a79592572a38a268d99b25b67"
+ ],
+ "version": [
+ "easyrsa",
+ "dynamic",
+ "851da86e953acd82c0991bb6e68e42819c3a3bce501c177dff2c7cab01794982"
+ ],
+ "wheelhouse.txt": [
+ "layer:basic",
+ "dynamic",
+ "7cf3f983dc8f85b0c0ca6d69accdb4f4af842a911625286df09005ed1897d797"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/easyrsa/.github/workflows/tox.yaml b/easyrsa/.github/workflows/tox.yaml
new file mode 100644
index 0000000..b07172d
--- /dev/null
+++ b/easyrsa/.github/workflows/tox.yaml
@@ -0,0 +1,22 @@
+name: Run tests with Tox
+
+on: [push]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.5, 3.6, 3.7, 3.8]
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Tox and any other packages
+ run: pip install tox
+ - name: Run Tox
+ run: tox -e py # Run tox using the version of Python in `PATH`
diff --git a/easyrsa/.gitignore b/easyrsa/.gitignore
new file mode 100644
index 0000000..fa25a92
--- /dev/null
+++ b/easyrsa/.gitignore
@@ -0,0 +1,5 @@
+.tox
+.cache
+.unit-state.db
+*__pycache__
+*.pyc
diff --git a/easyrsa/.travis/profile-update.yaml b/easyrsa/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/easyrsa/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/easyrsa/CONTRIBUTING.md b/easyrsa/CONTRIBUTING.md
new file mode 100644
index 0000000..0f7bb82
--- /dev/null
+++ b/easyrsa/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, pleas euse the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-easyrsa/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-easyrsa.md)
diff --git a/easyrsa/LICENSE b/easyrsa/LICENSE
new file mode 100644
index 0000000..0543093
--- /dev/null
+++ b/easyrsa/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright Canonical, Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/easyrsa/Makefile b/easyrsa/Makefile
new file mode 100644
index 0000000..a1ad3a5
--- /dev/null
+++ b/easyrsa/Makefile
@@ -0,0 +1,24 @@
+#!/usr/bin/make
+
+all: lint unit_test
+
+
+.PHONY: clean
+clean:
+ @rm -rf .tox
+
+.PHONY: apt_prereqs
+apt_prereqs:
+ @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
+ @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
+
+.PHONY: lint
+lint: apt_prereqs
+ @tox --notest
+ @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
+ @charm proof
+
+.PHONY: unit_test
+unit_test: apt_prereqs
+ @echo Starting tests...
+ tox
diff --git a/easyrsa/README.md b/easyrsa/README.md
new file mode 100644
index 0000000..1ad63b2
--- /dev/null
+++ b/easyrsa/README.md
@@ -0,0 +1,13 @@
+# EasyRSA
+
+This charm delivers the EasyRSA application to act as a Certificate Authority
+(CA) and creates certificates for related charms.
+
+EasyRSA is a command line utility to build and manage Public Key
+Infrastructure (PKI) Certificate Authority (CA).
+
+The purpose of a Public Key Infrastructure (PKI) is to facilitate the secure
+electronic transfer of information.
+
+This charm is maintained along with the components of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-easyrsa).
diff --git a/easyrsa/actions.yaml b/easyrsa/actions.yaml
new file mode 100644
index 0000000..35123a8
--- /dev/null
+++ b/easyrsa/actions.yaml
@@ -0,0 +1,25 @@
+"debug":
+ "description": "Collect debug data"
+"backup":
+ "description": "Creates a backup bundle containing current easyrsa pki."
+"restore":
+ "description": "Restores a pki previously backed up by 'backup' action"
+ "params":
+ "name":
+ "type": "string"
+ "description": "Name of the backup file from which the pki will be restored."
+ "required":
+ - "name"
+"list-backups":
+ "description": "List all available easyrsa backups."
+"delete-backup":
+ "description": "Delete specified (or all) easyrsa backups."
+ "params":
+ "name":
+ "type": "string"
+ "description": |
+ Name of the backup to be deleted. (list of all backups can be
+ acquired using 'list-backups' action)
+ "all":
+ "type": "boolean"
+ "description": "If set to 'true', all backups on the unit will be deleted."
diff --git a/easyrsa/actions/__init__.py b/easyrsa/actions/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/easyrsa/actions/actions.py b/easyrsa/actions/actions.py
new file mode 100755
index 0000000..91a9e36
--- /dev/null
+++ b/easyrsa/actions/actions.py
@@ -0,0 +1,362 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import pwd
+import grp
+import sys
+import tarfile
+import shutil
+
+from datetime import datetime
+
+from charms import layer
+from charms.reactive.relations import endpoint_from_name
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ function_get,
+ function_set,
+ function_fail,
+ local_unit,
+ log,
+ leader_set,
+ leader_get,
+)
+
+from reactive.easyrsa import (
+ easyrsa_directory,
+ create_client_certificate,
+ create_server_certificate,
+)
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+PKI_BACKUP = '/home/ubuntu/easyrsa_backup'
+# Minimal required contents of the backup tarball
+TAR_STRUCTURE = {'pki',
+ 'pki/ca.crt',
+ 'pki/issued',
+ 'pki/issued/client.crt',
+ 'pki/private',
+ 'pki/private/ca.key',
+ 'pki/private/client.key',
+ 'pki/serial',
+ }
+
+
+def _check_path_traversal(path_, parent_dir):
+ """Check that 'path_' does not lie outside of the 'parent_dir'.
+
+ This function takes into account possible '../' in 'path_' and also
+ any symlinks that could point somewhere outside the expected 'parent_dir'
+
+ NOTE(mkalcok): This implementation could be improved by using
+ 'os.path.commonpath()'. However it's available only in
+ py35+.
+
+ :param path_: Path to be tested
+ :param parent_dir: Directory in which the 'path_' must lie
+ :raises: RuntimeError if 'path_' is outside of the 'parent_dir'
+ """
+ full_path = os.path.realpath(path_)
+ parent_dir = os.path.realpath(parent_dir)
+ if not parent_dir.endswith('/'):
+ parent_dir += '/'
+
+ if os.path.commonprefix([parent_dir, full_path]) != parent_dir:
+ err_msg = "Path traversal detected. '{}' tries to travers out " \
+ "of {}".format(full_path, parent_dir)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+
+
+def _ensure_backup_dir_exists():
+ """Ensure that backup directory exists with proper ownership"""
+ uid = pwd.getpwnam("ubuntu").pw_uid
+ gid = grp.getgrnam("ubuntu").gr_gid
+ try:
+ os.mkdir(PKI_BACKUP, mode=0o700)
+ except FileExistsError:
+ pass
+ os.chown(PKI_BACKUP, uid, gid)
+
+ if not os.path.isdir(PKI_BACKUP):
+ log("Backup destination '{}' is not a directory".format(PKI_BACKUP),
+ hookenv.ERROR)
+ raise RuntimeError('Backup destination is not a directory.')
+
+
+def _verify_backup(pki_tar):
+ """
+ Verify that backup archive contains expected files
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ """
+ log("Verifying backup", hookenv.DEBUG)
+ members = set(pki_tar.getnames())
+
+ # Check that backup contains all the expected/required files
+ if not TAR_STRUCTURE.issubset(members):
+ raise RuntimeError("Backup has unexpected content. Corrupted file?")
+ log("Check expected files - OK", hookenv.DEBUG)
+
+ # Check for path traversal attempts in tar file
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ for path_ in members:
+ destination = os.path.join(pki_dir, path_)
+ _check_path_traversal(destination, pki_dir)
+
+
+def _replace_pki(pki_tar, pki_dir):
+ """
+ Safely replace easyrsa pki directory.
+
+ If there are any problems during the extraction of the backup, original
+ pki directory will be brought back and error raised.
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ :param pki_dir: Destination for extraction of easyrsa backup
+ :return: None
+ """
+ safety_backup = os.path.join(easyrsa_directory, 'pki_backup')
+ shutil.move(pki_dir, safety_backup)
+ try:
+ log("Extracting pki from backup", hookenv.DEBUG)
+ pki_tar.extractall(easyrsa_directory)
+ except Exception as exc:
+ log("pki extraction failed: {}".format(exc),
+ hookenv.WARNING)
+ log("Restoring original pki.", hookenv.INFO)
+ shutil.move(safety_backup, pki_dir)
+ raise RuntimeError('Failed to extract backup bundle. '
+ 'Error: {}'.format(exc))
+ else:
+ shutil.rmtree(safety_backup)
+
+
+def _update_leadership_data(pki_dir, cert_dir, key_dir):
+ """
+ Update certificates stored in the leaders database.
+
+ :param pki_dir: location of easyrsa pki (usually /EasyRSA/pki)
+ :param cert_dir: location of issued certificates (usually /issued)
+ :param key_dir: location of private keys (usually /private)
+ :return: None
+ """
+ ca_cert = os.path.join(pki_dir, 'ca.crt')
+ ca_key = os.path.join(key_dir, 'ca.key')
+ serial_file = os.path.join(pki_dir, 'serial')
+ global_client_cert = os.path.join(cert_dir, 'client.crt')
+ global_client_key = os.path.join(key_dir, 'client.key')
+
+ with open(ca_cert, 'r') as stream:
+ data = stream.read()
+ log("Updating CA certificate in leader's database",
+ hookenv.INFO)
+ log("CA certificate:\n{}".format(data), hookenv.DEBUG)
+ leader_set({
+ 'certificate_authority': data})
+
+ with open(ca_key, 'r') as stream:
+ log("Updating CA key in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_key': stream.read()})
+
+ with open(serial_file, 'r') as stream:
+ log("Updating CA serial in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_serial': stream.read()})
+
+ with open(global_client_cert) as stream:
+ data = stream.read()
+ log("Updating (legacy) global client certificate in leader's database",
+ hookenv.INFO)
+ log(data, hookenv.DEBUG)
+ leader_set({'client_certificate': data})
+
+ with open(global_client_key) as stream:
+ log("Updating (legacy) global client key in leader's database",
+ hookenv.INFO)
+ leader_set({'client_key': stream.read()})
+
+
+def backup():
+ """
+ Implementation of easyrsa 'backup' action.
+
+ Currently deployed pki is packed into tarball and stored in the
+ backups directory.
+ """
+ _ensure_backup_dir_exists()
+
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ backup_name = 'easyrsa-{}.tar.gz'.format(timestamp)
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+ with tarfile.open(backup_path, mode='w:gz') as pki_tar:
+ pki_tar.add(os.path.join(easyrsa_directory, 'pki'), 'pki')
+
+ log("Backup created and saved to '{}'".format(backup_path), hookenv.DEBUG)
+ function_set({
+ 'command': 'juju scp {}:{} .'.format(local_unit(), backup_path),
+ 'message': 'Backup archive created successfully. Use the juju scp '
+ 'command to copy it to your local machine.'
+ })
+
+
+def restore():
+ """
+ Implementation of easyrsa 'restore' action
+
+ Backup restoration process can be summarized as following:
+
+ * Selected backup is scanned and verified
+ * Contents of the backup are unpacked into /EasyRSA/pki
+ * Data that are stored in the local database are updated
+ * All units that have relation with this easyrsa unit will be notified
+ about the certificate changes.
+ """
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ backup_name = function_get('name')
+
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required.")
+
+ log("Restoring pki from backup file {}".format(backup_name), hookenv.INFO)
+
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+
+ if not os.path.isfile(backup_path):
+ log("Backup file '{}' does not exists.".format(backup_path),
+ hookenv.ERROR)
+ raise RuntimeError("Backup with name '{}' does not exist. Use action "
+ "'list-backups' to list all available "
+ "backups".format(backup_name))
+
+ with tarfile.open(backup_path, 'r:gz') as pki_tar:
+ _verify_backup(pki_tar)
+ _replace_pki(pki_tar, pki_dir)
+
+ cert_dir = os.path.join(pki_dir, 'issued')
+ key_dir = os.path.join(pki_dir, 'private')
+
+ # Update CA and global client data stored in the local leader's database
+ # NOTE(mkalcok): Easyrsa does not really support HA mode, so it's usually
+ # run as a single unit/model
+ _update_leadership_data(pki_dir, cert_dir, key_dir)
+
+ ca_cert = leader_get('certificate_authority')
+ tls = endpoint_from_name('client')
+ log("Sending CA certificate to all related units", hookenv.INFO)
+ tls.set_ca(ca_cert)
+ log("Sending global client certificate and key to all related units",
+ hookenv.INFO)
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+ for client in tls.all_requests:
+ try:
+ cert_file = os.path.join(cert_dir,
+ "{}.crt".format(client.common_name))
+ key_file = os.path.join(key_dir,
+ "{}.key".format(client.common_name))
+ with open(cert_file, 'r') as file:
+ cert = file.read()
+ with open(key_file, 'r') as file:
+ key = file.read()
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ except FileNotFoundError:
+ log("Certificate for '{}' not found in backup. "
+ "Generating new one.", hookenv.INFO)
+ if client.cert_type == 'client':
+ cert, key = create_client_certificate(client.common_name)
+ elif client.cert_type == 'server':
+ cert, key = create_server_certificate(client.common_name,
+ client.sans,
+ client.common_name)
+ else:
+ # This use case should not really happen as easyrsa charm
+ # does not support Application type certificates
+ raise RuntimeError('Unrecognized certificate request type '
+ '"{}".'.format(client.cert_type))
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ hookenv._run_atexit()
+
+
+def list_backups():
+ """Implementation of easyrsa 'list-backups' action."""
+ file_list = []
+
+ try:
+ file_list = os.listdir(PKI_BACKUP)
+ except FileNotFoundError:
+ pass
+
+ if file_list:
+ message = 'Available backup files:'
+ for file in file_list:
+ message += '\n{}'.format(file)
+ else:
+ message = 'There are no available backup files.'
+
+ function_set({'message': message})
+
+
+def delete_backup():
+ """Implementation of easyrsa 'delete-backup' action"""
+ backup_name = function_get('name')
+ delete_all = function_get('all')
+
+ if not delete_all:
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required if parameter "
+ "'all' is False.")
+ log("Removing backup '{}'".format(backup_name), hookenv.INFO)
+ delete_file = os.path.join(PKI_BACKUP, backup_name)
+ _check_path_traversal(delete_file, PKI_BACKUP)
+ try:
+ os.remove(delete_file)
+ except FileNotFoundError:
+ err_msg = "Backup file '{}' does not exist".format(backup_name)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+ else:
+ log("Removing all backup files.", hookenv.INFO)
+ shutil.rmtree(PKI_BACKUP)
+
+
+ACTIONS = {'backup': backup,
+ 'restore': restore,
+ 'list-backups': list_backups,
+ 'delete-backup': delete_backup
+ }
+
+
+def main(args):
+ action_name = os.path.basename(args.pop(0))
+ try:
+ action = ACTIONS[action_name]
+ except KeyError:
+ s = "Action {} undefined".format(action_name)
+ function_fail(s)
+ return
+ else:
+ try:
+ log("Running action '{}'.".format(action_name))
+ action()
+ except Exception as e:
+ function_fail("Action {} failed: {}".format(action_name, str(e)))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/easyrsa/actions/backup b/easyrsa/actions/backup
new file mode 100755
index 0000000..91a9e36
--- /dev/null
+++ b/easyrsa/actions/backup
@@ -0,0 +1,362 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import pwd
+import grp
+import sys
+import tarfile
+import shutil
+
+from datetime import datetime
+
+from charms import layer
+from charms.reactive.relations import endpoint_from_name
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ function_get,
+ function_set,
+ function_fail,
+ local_unit,
+ log,
+ leader_set,
+ leader_get,
+)
+
+from reactive.easyrsa import (
+ easyrsa_directory,
+ create_client_certificate,
+ create_server_certificate,
+)
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+PKI_BACKUP = '/home/ubuntu/easyrsa_backup'
+# Minimal required contents of the backup tarball
+TAR_STRUCTURE = {'pki',
+ 'pki/ca.crt',
+ 'pki/issued',
+ 'pki/issued/client.crt',
+ 'pki/private',
+ 'pki/private/ca.key',
+ 'pki/private/client.key',
+ 'pki/serial',
+ }
+
+
+def _check_path_traversal(path_, parent_dir):
+ """Check that 'path_' does not lie outside of the 'parent_dir'.
+
+ This function takes into account possible '../' in 'path_' and also
+ any symlinks that could point somewhere outside the expected 'parent_dir'
+
+ NOTE(mkalcok): This implementation could be improved by using
+ 'os.path.commonpath()'. However it's available only in
+ py35+.
+
+ :param path_: Path to be tested
+ :param parent_dir: Directory in which the 'path_' must lie
+ :raises: RuntimeError if 'path_' is outside of the 'parent_dir'
+ """
+ full_path = os.path.realpath(path_)
+ parent_dir = os.path.realpath(parent_dir)
+ if not parent_dir.endswith('/'):
+ parent_dir += '/'
+
+ if os.path.commonprefix([parent_dir, full_path]) != parent_dir:
+ err_msg = "Path traversal detected. '{}' tries to travers out " \
+ "of {}".format(full_path, parent_dir)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+
+
+def _ensure_backup_dir_exists():
+ """Ensure that backup directory exists with proper ownership"""
+ uid = pwd.getpwnam("ubuntu").pw_uid
+ gid = grp.getgrnam("ubuntu").gr_gid
+ try:
+ os.mkdir(PKI_BACKUP, mode=0o700)
+ except FileExistsError:
+ pass
+ os.chown(PKI_BACKUP, uid, gid)
+
+ if not os.path.isdir(PKI_BACKUP):
+ log("Backup destination '{}' is not a directory".format(PKI_BACKUP),
+ hookenv.ERROR)
+ raise RuntimeError('Backup destination is not a directory.')
+
+
+def _verify_backup(pki_tar):
+ """
+ Verify that backup archive contains expected files
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ """
+ log("Verifying backup", hookenv.DEBUG)
+ members = set(pki_tar.getnames())
+
+ # Check that backup contains all the expected/required files
+ if not TAR_STRUCTURE.issubset(members):
+ raise RuntimeError("Backup has unexpected content. Corrupted file?")
+ log("Check expected files - OK", hookenv.DEBUG)
+
+ # Check for path traversal attempts in tar file
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ for path_ in members:
+ destination = os.path.join(pki_dir, path_)
+ _check_path_traversal(destination, pki_dir)
+
+
+def _replace_pki(pki_tar, pki_dir):
+ """
+ Safely replace easyrsa pki directory.
+
+ If there are any problems during the extraction of the backup, original
+ pki directory will be brought back and error raised.
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ :param pki_dir: Destination for extraction of easyrsa backup
+ :return: None
+ """
+ safety_backup = os.path.join(easyrsa_directory, 'pki_backup')
+ shutil.move(pki_dir, safety_backup)
+ try:
+ log("Extracting pki from backup", hookenv.DEBUG)
+ pki_tar.extractall(easyrsa_directory)
+ except Exception as exc:
+ log("pki extraction failed: {}".format(exc),
+ hookenv.WARNING)
+ log("Restoring original pki.", hookenv.INFO)
+ shutil.move(safety_backup, pki_dir)
+ raise RuntimeError('Failed to extract backup bundle. '
+ 'Error: {}'.format(exc))
+ else:
+ shutil.rmtree(safety_backup)
+
+
+def _update_leadership_data(pki_dir, cert_dir, key_dir):
+ """
+ Update certificates stored in the leaders database.
+
+ :param pki_dir: location of easyrsa pki (usually /EasyRSA/pki)
+ :param cert_dir: location of issued certificates (usually /issued)
+ :param key_dir: location of private keys (usually /private)
+ :return: None
+ """
+ ca_cert = os.path.join(pki_dir, 'ca.crt')
+ ca_key = os.path.join(key_dir, 'ca.key')
+ serial_file = os.path.join(pki_dir, 'serial')
+ global_client_cert = os.path.join(cert_dir, 'client.crt')
+ global_client_key = os.path.join(key_dir, 'client.key')
+
+ with open(ca_cert, 'r') as stream:
+ data = stream.read()
+ log("Updating CA certificate in leader's database",
+ hookenv.INFO)
+ log("CA certificate:\n{}".format(data), hookenv.DEBUG)
+ leader_set({
+ 'certificate_authority': data})
+
+ with open(ca_key, 'r') as stream:
+ log("Updating CA key in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_key': stream.read()})
+
+ with open(serial_file, 'r') as stream:
+ log("Updating CA serial in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_serial': stream.read()})
+
+ with open(global_client_cert) as stream:
+ data = stream.read()
+ log("Updating (legacy) global client certificate in leader's database",
+ hookenv.INFO)
+ log(data, hookenv.DEBUG)
+ leader_set({'client_certificate': data})
+
+ with open(global_client_key) as stream:
+ log("Updating (legacy) global client key in leader's database",
+ hookenv.INFO)
+ leader_set({'client_key': stream.read()})
+
+
+def backup():
+ """
+ Implementation of easyrsa 'backup' action.
+
+ Currently deployed pki is packed into tarball and stored in the
+ backups directory.
+ """
+ _ensure_backup_dir_exists()
+
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ backup_name = 'easyrsa-{}.tar.gz'.format(timestamp)
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+ with tarfile.open(backup_path, mode='w:gz') as pki_tar:
+ pki_tar.add(os.path.join(easyrsa_directory, 'pki'), 'pki')
+
+ log("Backup created and saved to '{}'".format(backup_path), hookenv.DEBUG)
+ function_set({
+ 'command': 'juju scp {}:{} .'.format(local_unit(), backup_path),
+ 'message': 'Backup archive created successfully. Use the juju scp '
+ 'command to copy it to your local machine.'
+ })
+
+
+def restore():
+ """
+ Implementation of easyrsa 'restore' action
+
+ Backup restoration process can be summarized as following:
+
+ * Selected backup is scanned and verified
+ * Contents of the backup are unpacked into /EasyRSA/pki
+ * Data that are stored in the local database are updated
+ * All units that have relation with this easyrsa unit will be notified
+ about the certificate changes.
+ """
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ backup_name = function_get('name')
+
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required.")
+
+ log("Restoring pki from backup file {}".format(backup_name), hookenv.INFO)
+
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+
+ if not os.path.isfile(backup_path):
+ log("Backup file '{}' does not exists.".format(backup_path),
+ hookenv.ERROR)
+ raise RuntimeError("Backup with name '{}' does not exist. Use action "
+ "'list-backups' to list all available "
+ "backups".format(backup_name))
+
+ with tarfile.open(backup_path, 'r:gz') as pki_tar:
+ _verify_backup(pki_tar)
+ _replace_pki(pki_tar, pki_dir)
+
+ cert_dir = os.path.join(pki_dir, 'issued')
+ key_dir = os.path.join(pki_dir, 'private')
+
+ # Update CA and global client data stored in the local leader's database
+ # NOTE(mkalcok): Easyrsa does not really support HA mode, so it's usually
+ # run as a single unit/model
+ _update_leadership_data(pki_dir, cert_dir, key_dir)
+
+ ca_cert = leader_get('certificate_authority')
+ tls = endpoint_from_name('client')
+ log("Sending CA certificate to all related units", hookenv.INFO)
+ tls.set_ca(ca_cert)
+ log("Sending global client certificate and key to all related units",
+ hookenv.INFO)
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+ for client in tls.all_requests:
+ try:
+ cert_file = os.path.join(cert_dir,
+ "{}.crt".format(client.common_name))
+ key_file = os.path.join(key_dir,
+ "{}.key".format(client.common_name))
+ with open(cert_file, 'r') as file:
+ cert = file.read()
+ with open(key_file, 'r') as file:
+ key = file.read()
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ except FileNotFoundError:
+ log("Certificate for '{}' not found in backup. "
+ "Generating new one.", hookenv.INFO)
+ if client.cert_type == 'client':
+ cert, key = create_client_certificate(client.common_name)
+ elif client.cert_type == 'server':
+ cert, key = create_server_certificate(client.common_name,
+ client.sans,
+ client.common_name)
+ else:
+ # This use case should not really happen as easyrsa charm
+ # does not support Application type certificates
+ raise RuntimeError('Unrecognized certificate request type '
+ '"{}".'.format(client.cert_type))
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ hookenv._run_atexit()
+
+
+def list_backups():
+ """Implementation of easyrsa 'list-backups' action."""
+ file_list = []
+
+ try:
+ file_list = os.listdir(PKI_BACKUP)
+ except FileNotFoundError:
+ pass
+
+ if file_list:
+ message = 'Available backup files:'
+ for file in file_list:
+ message += '\n{}'.format(file)
+ else:
+ message = 'There are no available backup files.'
+
+ function_set({'message': message})
+
+
+def delete_backup():
+ """Implementation of easyrsa 'delete-backup' action"""
+ backup_name = function_get('name')
+ delete_all = function_get('all')
+
+ if not delete_all:
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required if parameter "
+ "'all' is False.")
+ log("Removing backup '{}'".format(backup_name), hookenv.INFO)
+ delete_file = os.path.join(PKI_BACKUP, backup_name)
+ _check_path_traversal(delete_file, PKI_BACKUP)
+ try:
+ os.remove(delete_file)
+ except FileNotFoundError:
+ err_msg = "Backup file '{}' does not exist".format(backup_name)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+ else:
+ log("Removing all backup files.", hookenv.INFO)
+ shutil.rmtree(PKI_BACKUP)
+
+
+ACTIONS = {'backup': backup,
+ 'restore': restore,
+ 'list-backups': list_backups,
+ 'delete-backup': delete_backup
+ }
+
+
+def main(args):
+ action_name = os.path.basename(args.pop(0))
+ try:
+ action = ACTIONS[action_name]
+ except KeyError:
+ s = "Action {} undefined".format(action_name)
+ function_fail(s)
+ return
+ else:
+ try:
+ log("Running action '{}'.".format(action_name))
+ action()
+ except Exception as e:
+ function_fail("Action {} failed: {}".format(action_name, str(e)))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/easyrsa/actions/debug b/easyrsa/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/easyrsa/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/easyrsa/actions/delete-backup b/easyrsa/actions/delete-backup
new file mode 100755
index 0000000..91a9e36
--- /dev/null
+++ b/easyrsa/actions/delete-backup
@@ -0,0 +1,362 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import pwd
+import grp
+import sys
+import tarfile
+import shutil
+
+from datetime import datetime
+
+from charms import layer
+from charms.reactive.relations import endpoint_from_name
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ function_get,
+ function_set,
+ function_fail,
+ local_unit,
+ log,
+ leader_set,
+ leader_get,
+)
+
+from reactive.easyrsa import (
+ easyrsa_directory,
+ create_client_certificate,
+ create_server_certificate,
+)
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+PKI_BACKUP = '/home/ubuntu/easyrsa_backup'
+# Minimal required contents of the backup tarball
+TAR_STRUCTURE = {'pki',
+ 'pki/ca.crt',
+ 'pki/issued',
+ 'pki/issued/client.crt',
+ 'pki/private',
+ 'pki/private/ca.key',
+ 'pki/private/client.key',
+ 'pki/serial',
+ }
+
+
+def _check_path_traversal(path_, parent_dir):
+ """Check that 'path_' does not lie outside of the 'parent_dir'.
+
+ This function takes into account possible '../' in 'path_' and also
+ any symlinks that could point somewhere outside the expected 'parent_dir'
+
+ NOTE(mkalcok): This implementation could be improved by using
+ 'os.path.commonpath()'. However it's available only in
+ py35+.
+
+ :param path_: Path to be tested
+ :param parent_dir: Directory in which the 'path_' must lie
+ :raises: RuntimeError if 'path_' is outside of the 'parent_dir'
+ """
+ full_path = os.path.realpath(path_)
+ parent_dir = os.path.realpath(parent_dir)
+ if not parent_dir.endswith('/'):
+ parent_dir += '/'
+
+ if os.path.commonprefix([parent_dir, full_path]) != parent_dir:
+ err_msg = "Path traversal detected. '{}' tries to travers out " \
+ "of {}".format(full_path, parent_dir)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+
+
+def _ensure_backup_dir_exists():
+ """Ensure that backup directory exists with proper ownership"""
+ uid = pwd.getpwnam("ubuntu").pw_uid
+ gid = grp.getgrnam("ubuntu").gr_gid
+ try:
+ os.mkdir(PKI_BACKUP, mode=0o700)
+ except FileExistsError:
+ pass
+ os.chown(PKI_BACKUP, uid, gid)
+
+ if not os.path.isdir(PKI_BACKUP):
+ log("Backup destination '{}' is not a directory".format(PKI_BACKUP),
+ hookenv.ERROR)
+ raise RuntimeError('Backup destination is not a directory.')
+
+
+def _verify_backup(pki_tar):
+ """
+ Verify that backup archive contains expected files
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ """
+ log("Verifying backup", hookenv.DEBUG)
+ members = set(pki_tar.getnames())
+
+ # Check that backup contains all the expected/required files
+ if not TAR_STRUCTURE.issubset(members):
+ raise RuntimeError("Backup has unexpected content. Corrupted file?")
+ log("Check expected files - OK", hookenv.DEBUG)
+
+ # Check for path traversal attempts in tar file
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ for path_ in members:
+ destination = os.path.join(pki_dir, path_)
+ _check_path_traversal(destination, pki_dir)
+
+
+def _replace_pki(pki_tar, pki_dir):
+ """
+ Safely replace easyrsa pki directory.
+
+ If there are any problems during the extraction of the backup, original
+ pki directory will be brought back and error raised.
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ :param pki_dir: Destination for extraction of easyrsa backup
+ :return: None
+ """
+ safety_backup = os.path.join(easyrsa_directory, 'pki_backup')
+ shutil.move(pki_dir, safety_backup)
+ try:
+ log("Extracting pki from backup", hookenv.DEBUG)
+ pki_tar.extractall(easyrsa_directory)
+ except Exception as exc:
+ log("pki extraction failed: {}".format(exc),
+ hookenv.WARNING)
+ log("Restoring original pki.", hookenv.INFO)
+ shutil.move(safety_backup, pki_dir)
+ raise RuntimeError('Failed to extract backup bundle. '
+ 'Error: {}'.format(exc))
+ else:
+ shutil.rmtree(safety_backup)
+
+
+def _update_leadership_data(pki_dir, cert_dir, key_dir):
+ """
+ Update certificates stored in the leaders database.
+
+ :param pki_dir: location of easyrsa pki (usually /EasyRSA/pki)
+ :param cert_dir: location of issued certificates (usually /issued)
+ :param key_dir: location of private keys (usually /private)
+ :return: None
+ """
+ ca_cert = os.path.join(pki_dir, 'ca.crt')
+ ca_key = os.path.join(key_dir, 'ca.key')
+ serial_file = os.path.join(pki_dir, 'serial')
+ global_client_cert = os.path.join(cert_dir, 'client.crt')
+ global_client_key = os.path.join(key_dir, 'client.key')
+
+ with open(ca_cert, 'r') as stream:
+ data = stream.read()
+ log("Updating CA certificate in leader's database",
+ hookenv.INFO)
+ log("CA certificate:\n{}".format(data), hookenv.DEBUG)
+ leader_set({
+ 'certificate_authority': data})
+
+ with open(ca_key, 'r') as stream:
+ log("Updating CA key in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_key': stream.read()})
+
+ with open(serial_file, 'r') as stream:
+ log("Updating CA serial in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_serial': stream.read()})
+
+ with open(global_client_cert) as stream:
+ data = stream.read()
+ log("Updating (legacy) global client certificate in leader's database",
+ hookenv.INFO)
+ log(data, hookenv.DEBUG)
+ leader_set({'client_certificate': data})
+
+ with open(global_client_key) as stream:
+ log("Updating (legacy) global client key in leader's database",
+ hookenv.INFO)
+ leader_set({'client_key': stream.read()})
+
+
+def backup():
+ """
+ Implementation of easyrsa 'backup' action.
+
+ Currently deployed pki is packed into tarball and stored in the
+ backups directory.
+ """
+ _ensure_backup_dir_exists()
+
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ backup_name = 'easyrsa-{}.tar.gz'.format(timestamp)
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+ with tarfile.open(backup_path, mode='w:gz') as pki_tar:
+ pki_tar.add(os.path.join(easyrsa_directory, 'pki'), 'pki')
+
+ log("Backup created and saved to '{}'".format(backup_path), hookenv.DEBUG)
+ function_set({
+ 'command': 'juju scp {}:{} .'.format(local_unit(), backup_path),
+ 'message': 'Backup archive created successfully. Use the juju scp '
+ 'command to copy it to your local machine.'
+ })
+
+
+def restore():
+ """
+ Implementation of easyrsa 'restore' action
+
+ Backup restoration process can be summarized as following:
+
+ * Selected backup is scanned and verified
+ * Contents of the backup are unpacked into /EasyRSA/pki
+ * Data that are stored in the local database are updated
+ * All units that have relation with this easyrsa unit will be notified
+ about the certificate changes.
+ """
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ backup_name = function_get('name')
+
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required.")
+
+ log("Restoring pki from backup file {}".format(backup_name), hookenv.INFO)
+
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+
+ if not os.path.isfile(backup_path):
+ log("Backup file '{}' does not exists.".format(backup_path),
+ hookenv.ERROR)
+ raise RuntimeError("Backup with name '{}' does not exist. Use action "
+ "'list-backups' to list all available "
+ "backups".format(backup_name))
+
+ with tarfile.open(backup_path, 'r:gz') as pki_tar:
+ _verify_backup(pki_tar)
+ _replace_pki(pki_tar, pki_dir)
+
+ cert_dir = os.path.join(pki_dir, 'issued')
+ key_dir = os.path.join(pki_dir, 'private')
+
+ # Update CA and global client data stored in the local leader's database
+ # NOTE(mkalcok): Easyrsa does not really support HA mode, so it's usually
+ # run as a single unit/model
+ _update_leadership_data(pki_dir, cert_dir, key_dir)
+
+ ca_cert = leader_get('certificate_authority')
+ tls = endpoint_from_name('client')
+ log("Sending CA certificate to all related units", hookenv.INFO)
+ tls.set_ca(ca_cert)
+ log("Sending global client certificate and key to all related units",
+ hookenv.INFO)
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+ for client in tls.all_requests:
+ try:
+ cert_file = os.path.join(cert_dir,
+ "{}.crt".format(client.common_name))
+ key_file = os.path.join(key_dir,
+ "{}.key".format(client.common_name))
+ with open(cert_file, 'r') as file:
+ cert = file.read()
+ with open(key_file, 'r') as file:
+ key = file.read()
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ except FileNotFoundError:
+ log("Certificate for '{}' not found in backup. "
+ "Generating new one.", hookenv.INFO)
+ if client.cert_type == 'client':
+ cert, key = create_client_certificate(client.common_name)
+ elif client.cert_type == 'server':
+ cert, key = create_server_certificate(client.common_name,
+ client.sans,
+ client.common_name)
+ else:
+ # This use case should not really happen as easyrsa charm
+ # does not support Application type certificates
+ raise RuntimeError('Unrecognized certificate request type '
+ '"{}".'.format(client.cert_type))
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ hookenv._run_atexit()
+
+
+def list_backups():
+ """Implementation of easyrsa 'list-backups' action."""
+ file_list = []
+
+ try:
+ file_list = os.listdir(PKI_BACKUP)
+ except FileNotFoundError:
+ pass
+
+ if file_list:
+ message = 'Available backup files:'
+ for file in file_list:
+ message += '\n{}'.format(file)
+ else:
+ message = 'There are no available backup files.'
+
+ function_set({'message': message})
+
+
+def delete_backup():
+ """Implementation of easyrsa 'delete-backup' action"""
+ backup_name = function_get('name')
+ delete_all = function_get('all')
+
+ if not delete_all:
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required if parameter "
+ "'all' is False.")
+ log("Removing backup '{}'".format(backup_name), hookenv.INFO)
+ delete_file = os.path.join(PKI_BACKUP, backup_name)
+ _check_path_traversal(delete_file, PKI_BACKUP)
+ try:
+ os.remove(delete_file)
+ except FileNotFoundError:
+ err_msg = "Backup file '{}' does not exist".format(backup_name)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+ else:
+ log("Removing all backup files.", hookenv.INFO)
+ shutil.rmtree(PKI_BACKUP)
+
+
+ACTIONS = {'backup': backup,
+ 'restore': restore,
+ 'list-backups': list_backups,
+ 'delete-backup': delete_backup
+ }
+
+
+def main(args):
+ action_name = os.path.basename(args.pop(0))
+ try:
+ action = ACTIONS[action_name]
+ except KeyError:
+ s = "Action {} undefined".format(action_name)
+ function_fail(s)
+ return
+ else:
+ try:
+ log("Running action '{}'.".format(action_name))
+ action()
+ except Exception as e:
+ function_fail("Action {} failed: {}".format(action_name, str(e)))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/easyrsa/actions/list-backups b/easyrsa/actions/list-backups
new file mode 100755
index 0000000..91a9e36
--- /dev/null
+++ b/easyrsa/actions/list-backups
@@ -0,0 +1,362 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import pwd
+import grp
+import sys
+import tarfile
+import shutil
+
+from datetime import datetime
+
+from charms import layer
+from charms.reactive.relations import endpoint_from_name
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ function_get,
+ function_set,
+ function_fail,
+ local_unit,
+ log,
+ leader_set,
+ leader_get,
+)
+
+from reactive.easyrsa import (
+ easyrsa_directory,
+ create_client_certificate,
+ create_server_certificate,
+)
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+PKI_BACKUP = '/home/ubuntu/easyrsa_backup'
+# Minimal required contents of the backup tarball
+TAR_STRUCTURE = {'pki',
+ 'pki/ca.crt',
+ 'pki/issued',
+ 'pki/issued/client.crt',
+ 'pki/private',
+ 'pki/private/ca.key',
+ 'pki/private/client.key',
+ 'pki/serial',
+ }
+
+
+def _check_path_traversal(path_, parent_dir):
+ """Check that 'path_' does not lie outside of the 'parent_dir'.
+
+ This function takes into account possible '../' in 'path_' and also
+ any symlinks that could point somewhere outside the expected 'parent_dir'
+
+ NOTE(mkalcok): This implementation could be improved by using
+ 'os.path.commonpath()'. However it's available only in
+ py35+.
+
+ :param path_: Path to be tested
+ :param parent_dir: Directory in which the 'path_' must lie
+ :raises: RuntimeError if 'path_' is outside of the 'parent_dir'
+ """
+ full_path = os.path.realpath(path_)
+ parent_dir = os.path.realpath(parent_dir)
+ if not parent_dir.endswith('/'):
+ parent_dir += '/'
+
+ if os.path.commonprefix([parent_dir, full_path]) != parent_dir:
+ err_msg = "Path traversal detected. '{}' tries to travers out " \
+ "of {}".format(full_path, parent_dir)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+
+
+def _ensure_backup_dir_exists():
+ """Ensure that backup directory exists with proper ownership"""
+ uid = pwd.getpwnam("ubuntu").pw_uid
+ gid = grp.getgrnam("ubuntu").gr_gid
+ try:
+ os.mkdir(PKI_BACKUP, mode=0o700)
+ except FileExistsError:
+ pass
+ os.chown(PKI_BACKUP, uid, gid)
+
+ if not os.path.isdir(PKI_BACKUP):
+ log("Backup destination '{}' is not a directory".format(PKI_BACKUP),
+ hookenv.ERROR)
+ raise RuntimeError('Backup destination is not a directory.')
+
+
+def _verify_backup(pki_tar):
+ """
+ Verify that backup archive contains expected files
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ """
+ log("Verifying backup", hookenv.DEBUG)
+ members = set(pki_tar.getnames())
+
+ # Check that backup contains all the expected/required files
+ if not TAR_STRUCTURE.issubset(members):
+ raise RuntimeError("Backup has unexpected content. Corrupted file?")
+ log("Check expected files - OK", hookenv.DEBUG)
+
+ # Check for path traversal attempts in tar file
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ for path_ in members:
+ destination = os.path.join(pki_dir, path_)
+ _check_path_traversal(destination, pki_dir)
+
+
+def _replace_pki(pki_tar, pki_dir):
+ """
+ Safely replace easyrsa pki directory.
+
+ If there are any problems during the extraction of the backup, original
+ pki directory will be brought back and error raised.
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ :param pki_dir: Destination for extraction of easyrsa backup
+ :return: None
+ """
+ safety_backup = os.path.join(easyrsa_directory, 'pki_backup')
+ shutil.move(pki_dir, safety_backup)
+ try:
+ log("Extracting pki from backup", hookenv.DEBUG)
+ pki_tar.extractall(easyrsa_directory)
+ except Exception as exc:
+ log("pki extraction failed: {}".format(exc),
+ hookenv.WARNING)
+ log("Restoring original pki.", hookenv.INFO)
+ shutil.move(safety_backup, pki_dir)
+ raise RuntimeError('Failed to extract backup bundle. '
+ 'Error: {}'.format(exc))
+ else:
+ shutil.rmtree(safety_backup)
+
+
+def _update_leadership_data(pki_dir, cert_dir, key_dir):
+ """
+ Update certificates stored in the leaders database.
+
+ :param pki_dir: location of easyrsa pki (usually /EasyRSA/pki)
+ :param cert_dir: location of issued certificates (usually /issued)
+ :param key_dir: location of private keys (usually /private)
+ :return: None
+ """
+ ca_cert = os.path.join(pki_dir, 'ca.crt')
+ ca_key = os.path.join(key_dir, 'ca.key')
+ serial_file = os.path.join(pki_dir, 'serial')
+ global_client_cert = os.path.join(cert_dir, 'client.crt')
+ global_client_key = os.path.join(key_dir, 'client.key')
+
+ with open(ca_cert, 'r') as stream:
+ data = stream.read()
+ log("Updating CA certificate in leader's database",
+ hookenv.INFO)
+ log("CA certificate:\n{}".format(data), hookenv.DEBUG)
+ leader_set({
+ 'certificate_authority': data})
+
+ with open(ca_key, 'r') as stream:
+ log("Updating CA key in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_key': stream.read()})
+
+ with open(serial_file, 'r') as stream:
+ log("Updating CA serial in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_serial': stream.read()})
+
+ with open(global_client_cert) as stream:
+ data = stream.read()
+ log("Updating (legacy) global client certificate in leader's database",
+ hookenv.INFO)
+ log(data, hookenv.DEBUG)
+ leader_set({'client_certificate': data})
+
+ with open(global_client_key) as stream:
+ log("Updating (legacy) global client key in leader's database",
+ hookenv.INFO)
+ leader_set({'client_key': stream.read()})
+
+
+def backup():
+ """
+ Implementation of easyrsa 'backup' action.
+
+ Currently deployed pki is packed into tarball and stored in the
+ backups directory.
+ """
+ _ensure_backup_dir_exists()
+
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ backup_name = 'easyrsa-{}.tar.gz'.format(timestamp)
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+ with tarfile.open(backup_path, mode='w:gz') as pki_tar:
+ pki_tar.add(os.path.join(easyrsa_directory, 'pki'), 'pki')
+
+ log("Backup created and saved to '{}'".format(backup_path), hookenv.DEBUG)
+ function_set({
+ 'command': 'juju scp {}:{} .'.format(local_unit(), backup_path),
+ 'message': 'Backup archive created successfully. Use the juju scp '
+ 'command to copy it to your local machine.'
+ })
+
+
+def restore():
+ """
+ Implementation of easyrsa 'restore' action
+
+ Backup restoration process can be summarized as following:
+
+ * Selected backup is scanned and verified
+ * Contents of the backup are unpacked into /EasyRSA/pki
+ * Data that are stored in the local database are updated
+ * All units that have relation with this easyrsa unit will be notified
+ about the certificate changes.
+ """
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ backup_name = function_get('name')
+
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required.")
+
+ log("Restoring pki from backup file {}".format(backup_name), hookenv.INFO)
+
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+
+ if not os.path.isfile(backup_path):
+ log("Backup file '{}' does not exists.".format(backup_path),
+ hookenv.ERROR)
+ raise RuntimeError("Backup with name '{}' does not exist. Use action "
+ "'list-backups' to list all available "
+ "backups".format(backup_name))
+
+ with tarfile.open(backup_path, 'r:gz') as pki_tar:
+ _verify_backup(pki_tar)
+ _replace_pki(pki_tar, pki_dir)
+
+ cert_dir = os.path.join(pki_dir, 'issued')
+ key_dir = os.path.join(pki_dir, 'private')
+
+ # Update CA and global client data stored in the local leader's database
+ # NOTE(mkalcok): Easyrsa does not really support HA mode, so it's usually
+ # run as a single unit/model
+ _update_leadership_data(pki_dir, cert_dir, key_dir)
+
+ ca_cert = leader_get('certificate_authority')
+ tls = endpoint_from_name('client')
+ log("Sending CA certificate to all related units", hookenv.INFO)
+ tls.set_ca(ca_cert)
+ log("Sending global client certificate and key to all related units",
+ hookenv.INFO)
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+ for client in tls.all_requests:
+ try:
+ cert_file = os.path.join(cert_dir,
+ "{}.crt".format(client.common_name))
+ key_file = os.path.join(key_dir,
+ "{}.key".format(client.common_name))
+ with open(cert_file, 'r') as file:
+ cert = file.read()
+ with open(key_file, 'r') as file:
+ key = file.read()
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ except FileNotFoundError:
+ log("Certificate for '{}' not found in backup. "
+ "Generating new one.", hookenv.INFO)
+ if client.cert_type == 'client':
+ cert, key = create_client_certificate(client.common_name)
+ elif client.cert_type == 'server':
+ cert, key = create_server_certificate(client.common_name,
+ client.sans,
+ client.common_name)
+ else:
+ # This use case should not really happen as easyrsa charm
+ # does not support Application type certificates
+ raise RuntimeError('Unrecognized certificate request type '
+ '"{}".'.format(client.cert_type))
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ hookenv._run_atexit()
+
+
+def list_backups():
+ """Implementation of easyrsa 'list-backups' action."""
+ file_list = []
+
+ try:
+ file_list = os.listdir(PKI_BACKUP)
+ except FileNotFoundError:
+ pass
+
+ if file_list:
+ message = 'Available backup files:'
+ for file in file_list:
+ message += '\n{}'.format(file)
+ else:
+ message = 'There are no available backup files.'
+
+ function_set({'message': message})
+
+
+def delete_backup():
+ """Implementation of easyrsa 'delete-backup' action"""
+ backup_name = function_get('name')
+ delete_all = function_get('all')
+
+ if not delete_all:
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required if parameter "
+ "'all' is False.")
+ log("Removing backup '{}'".format(backup_name), hookenv.INFO)
+ delete_file = os.path.join(PKI_BACKUP, backup_name)
+ _check_path_traversal(delete_file, PKI_BACKUP)
+ try:
+ os.remove(delete_file)
+ except FileNotFoundError:
+ err_msg = "Backup file '{}' does not exist".format(backup_name)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+ else:
+ log("Removing all backup files.", hookenv.INFO)
+ shutil.rmtree(PKI_BACKUP)
+
+
+ACTIONS = {'backup': backup,
+ 'restore': restore,
+ 'list-backups': list_backups,
+ 'delete-backup': delete_backup
+ }
+
+
+def main(args):
+ action_name = os.path.basename(args.pop(0))
+ try:
+ action = ACTIONS[action_name]
+ except KeyError:
+ s = "Action {} undefined".format(action_name)
+ function_fail(s)
+ return
+ else:
+ try:
+ log("Running action '{}'.".format(action_name))
+ action()
+ except Exception as e:
+ function_fail("Action {} failed: {}".format(action_name, str(e)))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/easyrsa/actions/restore b/easyrsa/actions/restore
new file mode 100755
index 0000000..91a9e36
--- /dev/null
+++ b/easyrsa/actions/restore
@@ -0,0 +1,362 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import pwd
+import grp
+import sys
+import tarfile
+import shutil
+
+from datetime import datetime
+
+from charms import layer
+from charms.reactive.relations import endpoint_from_name
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ function_get,
+ function_set,
+ function_fail,
+ local_unit,
+ log,
+ leader_set,
+ leader_get,
+)
+
+from reactive.easyrsa import (
+ easyrsa_directory,
+ create_client_certificate,
+ create_server_certificate,
+)
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+PKI_BACKUP = '/home/ubuntu/easyrsa_backup'
+# Minimal required contents of the backup tarball
+TAR_STRUCTURE = {'pki',
+ 'pki/ca.crt',
+ 'pki/issued',
+ 'pki/issued/client.crt',
+ 'pki/private',
+ 'pki/private/ca.key',
+ 'pki/private/client.key',
+ 'pki/serial',
+ }
+
+
+def _check_path_traversal(path_, parent_dir):
+ """Check that 'path_' does not lie outside of the 'parent_dir'.
+
+ This function takes into account possible '../' in 'path_' and also
+ any symlinks that could point somewhere outside the expected 'parent_dir'
+
+ NOTE(mkalcok): This implementation could be improved by using
+ 'os.path.commonpath()'. However it's available only in
+ py35+.
+
+ :param path_: Path to be tested
+ :param parent_dir: Directory in which the 'path_' must lie
+ :raises: RuntimeError if 'path_' is outside of the 'parent_dir'
+ """
+ full_path = os.path.realpath(path_)
+ parent_dir = os.path.realpath(parent_dir)
+ if not parent_dir.endswith('/'):
+ parent_dir += '/'
+
+ if os.path.commonprefix([parent_dir, full_path]) != parent_dir:
+ err_msg = "Path traversal detected. '{}' tries to travers out " \
+ "of {}".format(full_path, parent_dir)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+
+
+def _ensure_backup_dir_exists():
+ """Ensure that backup directory exists with proper ownership"""
+ uid = pwd.getpwnam("ubuntu").pw_uid
+ gid = grp.getgrnam("ubuntu").gr_gid
+ try:
+ os.mkdir(PKI_BACKUP, mode=0o700)
+ except FileExistsError:
+ pass
+ os.chown(PKI_BACKUP, uid, gid)
+
+ if not os.path.isdir(PKI_BACKUP):
+ log("Backup destination '{}' is not a directory".format(PKI_BACKUP),
+ hookenv.ERROR)
+ raise RuntimeError('Backup destination is not a directory.')
+
+
+def _verify_backup(pki_tar):
+ """
+ Verify that backup archive contains expected files
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ """
+ log("Verifying backup", hookenv.DEBUG)
+ members = set(pki_tar.getnames())
+
+ # Check that backup contains all the expected/required files
+ if not TAR_STRUCTURE.issubset(members):
+ raise RuntimeError("Backup has unexpected content. Corrupted file?")
+ log("Check expected files - OK", hookenv.DEBUG)
+
+ # Check for path traversal attempts in tar file
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ for path_ in members:
+ destination = os.path.join(pki_dir, path_)
+ _check_path_traversal(destination, pki_dir)
+
+
+def _replace_pki(pki_tar, pki_dir):
+ """
+ Safely replace easyrsa pki directory.
+
+ If there are any problems during the extraction of the backup, original
+ pki directory will be brought back and error raised.
+
+ :param pki_tar: Tarfile object containing easyrsa backup
+ :param pki_dir: Destination for extraction of easyrsa backup
+ :return: None
+ """
+ safety_backup = os.path.join(easyrsa_directory, 'pki_backup')
+ shutil.move(pki_dir, safety_backup)
+ try:
+ log("Extracting pki from backup", hookenv.DEBUG)
+ pki_tar.extractall(easyrsa_directory)
+ except Exception as exc:
+ log("pki extraction failed: {}".format(exc),
+ hookenv.WARNING)
+ log("Restoring original pki.", hookenv.INFO)
+ shutil.move(safety_backup, pki_dir)
+ raise RuntimeError('Failed to extract backup bundle. '
+ 'Error: {}'.format(exc))
+ else:
+ shutil.rmtree(safety_backup)
+
+
+def _update_leadership_data(pki_dir, cert_dir, key_dir):
+ """
+ Update certificates stored in the leaders database.
+
+ :param pki_dir: location of easyrsa pki (usually /EasyRSA/pki)
+ :param cert_dir: location of issued certificates (usually /issued)
+ :param key_dir: location of private keys (usually /private)
+ :return: None
+ """
+ ca_cert = os.path.join(pki_dir, 'ca.crt')
+ ca_key = os.path.join(key_dir, 'ca.key')
+ serial_file = os.path.join(pki_dir, 'serial')
+ global_client_cert = os.path.join(cert_dir, 'client.crt')
+ global_client_key = os.path.join(key_dir, 'client.key')
+
+ with open(ca_cert, 'r') as stream:
+ data = stream.read()
+ log("Updating CA certificate in leader's database",
+ hookenv.INFO)
+ log("CA certificate:\n{}".format(data), hookenv.DEBUG)
+ leader_set({
+ 'certificate_authority': data})
+
+ with open(ca_key, 'r') as stream:
+ log("Updating CA key in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_key': stream.read()})
+
+ with open(serial_file, 'r') as stream:
+ log("Updating CA serial in leader's database",
+ hookenv.INFO)
+ leader_set({
+ 'certificate_authority_serial': stream.read()})
+
+ with open(global_client_cert) as stream:
+ data = stream.read()
+ log("Updating (legacy) global client certificate in leader's database",
+ hookenv.INFO)
+ log(data, hookenv.DEBUG)
+ leader_set({'client_certificate': data})
+
+ with open(global_client_key) as stream:
+ log("Updating (legacy) global client key in leader's database",
+ hookenv.INFO)
+ leader_set({'client_key': stream.read()})
+
+
+def backup():
+ """
+ Implementation of easyrsa 'backup' action.
+
+ Currently deployed pki is packed into tarball and stored in the
+ backups directory.
+ """
+ _ensure_backup_dir_exists()
+
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ backup_name = 'easyrsa-{}.tar.gz'.format(timestamp)
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+ with tarfile.open(backup_path, mode='w:gz') as pki_tar:
+ pki_tar.add(os.path.join(easyrsa_directory, 'pki'), 'pki')
+
+ log("Backup created and saved to '{}'".format(backup_path), hookenv.DEBUG)
+ function_set({
+ 'command': 'juju scp {}:{} .'.format(local_unit(), backup_path),
+ 'message': 'Backup archive created successfully. Use the juju scp '
+ 'command to copy it to your local machine.'
+ })
+
+
+def restore():
+ """
+ Implementation of easyrsa 'restore' action
+
+ Backup restoration process can be summarized as following:
+
+ * Selected backup is scanned and verified
+ * Contents of the backup are unpacked into /EasyRSA/pki
+ * Data that are stored in the local database are updated
+ * All units that have relation with this easyrsa unit will be notified
+ about the certificate changes.
+ """
+ pki_dir = os.path.join(easyrsa_directory, 'pki')
+ backup_name = function_get('name')
+
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required.")
+
+ log("Restoring pki from backup file {}".format(backup_name), hookenv.INFO)
+
+ backup_path = os.path.join(PKI_BACKUP, backup_name)
+
+ if not os.path.isfile(backup_path):
+ log("Backup file '{}' does not exists.".format(backup_path),
+ hookenv.ERROR)
+ raise RuntimeError("Backup with name '{}' does not exist. Use action "
+ "'list-backups' to list all available "
+ "backups".format(backup_name))
+
+ with tarfile.open(backup_path, 'r:gz') as pki_tar:
+ _verify_backup(pki_tar)
+ _replace_pki(pki_tar, pki_dir)
+
+ cert_dir = os.path.join(pki_dir, 'issued')
+ key_dir = os.path.join(pki_dir, 'private')
+
+ # Update CA and global client data stored in the local leader's database
+ # NOTE(mkalcok): Easyrsa does not really support HA mode, so it's usually
+ # run as a single unit/model
+ _update_leadership_data(pki_dir, cert_dir, key_dir)
+
+ ca_cert = leader_get('certificate_authority')
+ tls = endpoint_from_name('client')
+ log("Sending CA certificate to all related units", hookenv.INFO)
+ tls.set_ca(ca_cert)
+ log("Sending global client certificate and key to all related units",
+ hookenv.INFO)
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+ for client in tls.all_requests:
+ try:
+ cert_file = os.path.join(cert_dir,
+ "{}.crt".format(client.common_name))
+ key_file = os.path.join(key_dir,
+ "{}.key".format(client.common_name))
+ with open(cert_file, 'r') as file:
+ cert = file.read()
+ with open(key_file, 'r') as file:
+ key = file.read()
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ except FileNotFoundError:
+ log("Certificate for '{}' not found in backup. "
+ "Generating new one.", hookenv.INFO)
+ if client.cert_type == 'client':
+ cert, key = create_client_certificate(client.common_name)
+ elif client.cert_type == 'server':
+ cert, key = create_server_certificate(client.common_name,
+ client.sans,
+ client.common_name)
+ else:
+ # This use case should not really happen as easyrsa charm
+ # does not support Application type certificates
+ raise RuntimeError('Unrecognized certificate request type '
+ '"{}".'.format(client.cert_type))
+ log("Sending certificate for '{}' to unit"
+ "'{}'".format(client.common_name, client.unit_name),
+ hookenv.INFO)
+ log(cert, hookenv.DEBUG)
+ client.set_cert(cert, key)
+
+ hookenv._run_atexit()
+
+
+def list_backups():
+ """Implementation of easyrsa 'list-backups' action."""
+ file_list = []
+
+ try:
+ file_list = os.listdir(PKI_BACKUP)
+ except FileNotFoundError:
+ pass
+
+ if file_list:
+ message = 'Available backup files:'
+ for file in file_list:
+ message += '\n{}'.format(file)
+ else:
+ message = 'There are no available backup files.'
+
+ function_set({'message': message})
+
+
+def delete_backup():
+ """Implementation of easyrsa 'delete-backup' action"""
+ backup_name = function_get('name')
+ delete_all = function_get('all')
+
+ if not delete_all:
+ if backup_name is None:
+ raise RuntimeError("Parameter 'name' is required if parameter "
+ "'all' is False.")
+ log("Removing backup '{}'".format(backup_name), hookenv.INFO)
+ delete_file = os.path.join(PKI_BACKUP, backup_name)
+ _check_path_traversal(delete_file, PKI_BACKUP)
+ try:
+ os.remove(delete_file)
+ except FileNotFoundError:
+ err_msg = "Backup file '{}' does not exist".format(backup_name)
+ log(err_msg, hookenv.ERROR)
+ raise RuntimeError(err_msg)
+ else:
+ log("Removing all backup files.", hookenv.INFO)
+ shutil.rmtree(PKI_BACKUP)
+
+
+ACTIONS = {'backup': backup,
+ 'restore': restore,
+ 'list-backups': list_backups,
+ 'delete-backup': delete_backup
+ }
+
+
+def main(args):
+ action_name = os.path.basename(args.pop(0))
+ try:
+ action = ACTIONS[action_name]
+ except KeyError:
+ s = "Action {} undefined".format(action_name)
+ function_fail(s)
+ return
+ else:
+ try:
+ log("Running action '{}'.".format(action_name))
+ action()
+ except Exception as e:
+ function_fail("Action {} failed: {}".format(action_name, str(e)))
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/easyrsa/bin/charm-env b/easyrsa/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/easyrsa/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/easyrsa/bin/layer_option b/easyrsa/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/easyrsa/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/easyrsa/config.yaml b/easyrsa/config.yaml
new file mode 100644
index 0000000..ffc0186
--- /dev/null
+++ b/easyrsa/config.yaml
@@ -0,0 +1 @@
+"options": {}
diff --git a/easyrsa/copyright b/easyrsa/copyright
new file mode 100644
index 0000000..c5e1c8f
--- /dev/null
+++ b/easyrsa/copyright
@@ -0,0 +1,13 @@
+Copyright 2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/easyrsa/copyright.layer-basic b/easyrsa/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/easyrsa/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/easyrsa/copyright.layer-leadership b/easyrsa/copyright.layer-leadership
new file mode 100644
index 0000000..08b983f
--- /dev/null
+++ b/easyrsa/copyright.layer-leadership
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Leadership Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/easyrsa/copyright.layer-options b/easyrsa/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/easyrsa/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/easyrsa/copyright.layer-status b/easyrsa/copyright.layer-status
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/easyrsa/copyright.layer-status
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/easyrsa/debug-scripts/charm-unitdata b/easyrsa/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/easyrsa/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/easyrsa/debug-scripts/filesystem b/easyrsa/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/easyrsa/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/easyrsa/debug-scripts/juju-logs b/easyrsa/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/easyrsa/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/easyrsa/debug-scripts/juju-network-get b/easyrsa/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/easyrsa/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/easyrsa/debug-scripts/network b/easyrsa/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/easyrsa/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/easyrsa/debug-scripts/packages b/easyrsa/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/easyrsa/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/easyrsa/debug-scripts/sysctl b/easyrsa/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/easyrsa/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/easyrsa/debug-scripts/systemd b/easyrsa/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/easyrsa/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/easyrsa/docs/status.md b/easyrsa/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/easyrsa/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/easyrsa/hooks/client-relation-broken b/easyrsa/hooks/client-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/client-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/client-relation-changed b/easyrsa/hooks/client-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/client-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/client-relation-created b/easyrsa/hooks/client-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/client-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/client-relation-departed b/easyrsa/hooks/client-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/client-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/client-relation-joined b/easyrsa/hooks/client-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/client-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/config-changed b/easyrsa/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/hook.template b/easyrsa/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/install b/easyrsa/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/leader-elected b/easyrsa/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/leader-settings-changed b/easyrsa/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/post-series-upgrade b/easyrsa/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/pre-series-upgrade b/easyrsa/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/relations/tls-certificates/.gitignore b/easyrsa/hooks/relations/tls-certificates/.gitignore
new file mode 100644
index 0000000..93813bc
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/.gitignore
@@ -0,0 +1,4 @@
+.tox
+__pycache__
+*.pyc
+_build
diff --git a/easyrsa/hooks/relations/tls-certificates/README.md b/easyrsa/hooks/relations/tls-certificates/README.md
new file mode 100644
index 0000000..733da6d
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/README.md
@@ -0,0 +1,90 @@
+# Interface tls-certificates
+
+This is a [Juju][] interface layer that enables a charm which requires TLS
+certificates to relate to a charm which can provide them, such as [Vault][] or
+[EasyRSA][]
+
+To get started please read the [Introduction to PKI][] which defines some PKI
+terms, concepts and processes used in this document.
+
+# Example Usage
+
+Let's say you have a charm which needs a server certificate for a service it
+provides to other charms and a client certificate for a database it consumes
+from another charm. The charm provides its own service on the `clients`
+relation endpoint, and it consumes the database on the `db` relation endpoint.
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+requires:
+ cert-provider:
+ interface: tls-certificates
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:tls-certificates
+```
+
+Then, in your reactive code, add the following, changing `update_certs` to
+handle the certificates however your charm needs:
+
+```python
+from charmhelpers.core import hookenv, host
+from charms.reactive import endpoint_from_flag
+
+
+@when('cert-provider.ca.changed')
+def install_root_ca_cert():
+ cert_provider = endpoint_from_flag('cert-provider.ca.available')
+ host.install_ca_cert(cert_provider.root_ca_cert)
+ clear_flag('cert-provider.ca.changed')
+
+
+@when('cert-provider.available')
+def request_certificates():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+
+ # get ingress info
+ ingress_for_clients = hookenv.network_get('clients')['ingress-addresses']
+ ingress_for_db = hookenv.network_get('db')['ingress-addresses']
+
+ # use first ingress address as primary and any additional as SANs
+ server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1]
+ client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1]
+
+ # request a single server and single client cert; note that multiple certs
+ # of either type can be requested as long as they have unique common names
+ cert_provider.request_server_cert(server_cn, server_sans)
+ cert_provider.request_client_cert(client_cn, client_sans)
+
+
+@when('cert-provider.certs.changed')
+def update_certs():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+ server_cert = cert_provider.server_certs[0] # only requested one
+ myserver.update_server_cert(server_cert.cert, server_cert.key)
+
+ client_cert = cert_provider.client_certs[0] # only requested one
+ myclient.update_client_cert(client_cert.cert, client_cert.key)
+ clear_flag('cert-provider.certs.changed')
+```
+
+
+# Reference
+
+ * [Requires](docs/requires.md)
+ * [Provides](docs/provides.md)
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Vault]: https://jujucharms.com/u/openstack-charmers/vault
+[EasyRSA]: https://jujucharms.com/u/containers/easyrsa
+[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md
diff --git a/easyrsa/hooks/relations/tls-certificates/__init__.py b/easyrsa/hooks/relations/tls-certificates/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/easyrsa/hooks/relations/tls-certificates/docs/common.md b/easyrsa/hooks/relations/tls-certificates/docs/common.md
new file mode 100644
index 0000000..25d0e08
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/docs/common.md
@@ -0,0 +1,51 @@
+
+
+Name of the application which the request came from.
+
+:returns: Name of application
+:rtype: str
+
+
cert
+
+
+The cert published for this request, if any.
+
+
cert_type
+
+
+Type of certificate, 'server' or 'client', being requested.
+
+
resolve_unit_name
+
+```python
+CertificateRequest.resolve_unit_name(unit)
+```
+Return name of unit associated with this request.
+
+unit_name should be provided in the relation data to ensure
+compatability with cross-model relations. If the unit name
+is absent then fall back to unit_name attribute of the
+unit associated with this request.
+
+:param unit: Unit to extract name from
+:type unit: charms.reactive.endpoints.RelatedUnit
+:returns: Name of unit
+:rtype: str
+
+
Certificate
+
+```python
+Certificate(self, cert_type, common_name, cert, key)
+```
+
+Represents a created certificate and key.
+
+The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+be accessed either as properties or as the contents of the dict.
+
diff --git a/easyrsa/hooks/relations/tls-certificates/docs/provides.md b/easyrsa/hooks/relations/tls-certificates/docs/provides.md
new file mode 100644
index 0000000..c213546
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/docs/provides.md
@@ -0,0 +1,212 @@
+
provides
+
+
+
TlsProvides
+
+```python
+TlsProvides(self, endpoint_name, relation_ids=None)
+```
+
+The provider's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[all_requests]: provides.md#provides.TlsProvides.all_requests
+[new_requests]: provides.md#provides.TlsProvides.new_requests
+[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+
+
all_published_certs
+
+
+List of all [Certificate][] instances that this provider has published
+for all related applications.
+
+
all_requests
+
+
+List of all requests that have been made.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('certs.regen',
+ 'tls.certs.available')
+def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_application_requests
+
+
+Filtered view of [new_requests][] that only includes application cert
+requests.
+
+Each will be an instance of [ApplicationCertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.application.certs.requested')
+def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_client_requests
+
+
+Filtered view of [new_requests][] that only includes client cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.client.certs.requested')
+def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_requests
+
+
+Filtered view of [all_requests][] that only includes requests that
+haven't been handled.
+
+Each will be an instance of [CertificateRequest][].
+
+This collection can also be further filtered by request type using
+[new_server_requests][] or [new_client_requests][].
+
+Example usage:
+
+```python
+@when('tls.certs.requested')
+def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_server_requests
+
+
+Filtered view of [new_requests][] that only includes server cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.server.certs.requested')
+def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
set_ca
+
+```python
+TlsProvides.set_ca(certificate_authority)
+```
+
+Publish the CA to all related applications.
+
+
set_chain
+
+```python
+TlsProvides.set_chain(chain)
+```
+
+Publish the chain of trust to all related applications.
+
+
set_client_cert
+
+```python
+TlsProvides.set_client_cert(cert, key)
+```
+
+Deprecated. This is only for backwards compatibility.
+
+Publish a globally shared client cert and key.
+
+
set_server_cert
+
+```python
+TlsProvides.set_server_cert(scope, cert, key)
+```
+
+Deprecated. Use one of the [new_requests][] collections and
+`request.set_cert()` instead.
+
+Set the server cert and key for the request identified by `scope`.
+
+
+
+```python
+TlsProvides.get_server_requests()
+```
+
+Deprecated. Use the [new_requests][] or [server_requests][]
+collections instead.
+
+One provider can have many requests to generate server certificates.
+Return a map of all server request objects indexed by a unique
+identifier.
+
diff --git a/easyrsa/hooks/relations/tls-certificates/docs/requires.md b/easyrsa/hooks/relations/tls-certificates/docs/requires.md
new file mode 100644
index 0000000..fdec902
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/docs/requires.md
@@ -0,0 +1,207 @@
+
requires
+
+
+
TlsRequires
+
+```python
+TlsRequires(self, endpoint_name, relation_ids=None)
+```
+
+The client's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+[server_certs]: requires.md#requires.TlsRequires.server_certs
+[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+[client_certs]: requires.md#requires.TlsRequires.server_certs
+
+
application_certs
+
+
+List of [Certificate][] instances for all available application certs.
+
+
client_certs
+
+
+List of [Certificate][] instances for all available client certs.
+
+
client_certs_map
+
+
+Mapping of client [Certificate][] instances by their `common_name`.
+
+
root_ca_cert
+
+
+Root CA certificate.
+
+
root_ca_chain
+
+
+The chain of trust for the root CA.
+
+
server_certs
+
+
+List of [Certificate][] instances for all available server certs.
+
+
server_certs_map
+
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
get_ca
+
+```python
+TlsRequires.get_ca()
+```
+
+Return the root CA certificate.
+
+Same as [root_ca_cert][].
+
+
get_chain
+
+```python
+TlsRequires.get_chain()
+```
+
+Return the chain of trust for the root CA.
+
+Same as [root_ca_chain][].
+
+
get_client_cert
+
+```python
+TlsRequires.get_client_cert()
+```
+
+Deprecated. Use [request_client_cert][] and the [client_certs][]
+collection instead.
+
+Return a globally shared client certificate and key.
+
+
get_server_cert
+
+```python
+TlsRequires.get_server_cert()
+```
+
+Deprecated. Use the [server_certs][] collection instead.
+
+Return the cert and key of the first server certificate requested.
+
+
get_batch_requests
+
+```python
+TlsRequires.get_batch_requests()
+```
+
+Deprecated. Use [server_certs_map][] instead.
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
request_server_cert
+
+```python
+TlsRequires.request_server_cert(cn, sans=None, cert_name=None)
+```
+
+Request a server certificate and key be generated for the given
+common name (`cn`) and optional list of alternative names (`sans`).
+
+The `cert_name` is deprecated and not needed.
+
+This can be called multiple times to request more than one server
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
+
+```python
+TlsRequires.request_server_certs()
+```
+
+Deprecated. Just use [request_server_cert][]; this does nothing.
+
+
request_client_cert
+
+```python
+TlsRequires.request_client_cert(cn, sans)
+```
+
+Request a client certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans`).
+
+This can be called multiple times to request more than one client
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
request_application_cert
+
+```python
+TlsRequires.request_application_cert(cn, sans)
+```
+
+Request an application certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans` ) of this
+unit and all peer units. All units will share a single certificates.
+
diff --git a/easyrsa/hooks/relations/tls-certificates/interface.yaml b/easyrsa/hooks/relations/tls-certificates/interface.yaml
new file mode 100644
index 0000000..beec53b
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/interface.yaml
@@ -0,0 +1,6 @@
+name: tls-certificates
+summary: |
+ A Transport Layer Security (TLS) charm layer that uses requires and provides
+ to exchange certifcates.
+version: 1
+repo: https://github.com/juju-solutions/interface-tls-certificates
diff --git a/easyrsa/hooks/relations/tls-certificates/make_docs b/easyrsa/hooks/relations/tls-certificates/make_docs
new file mode 100644
index 0000000..2f2274a
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/make_docs
@@ -0,0 +1,23 @@
+#!.tox/py3/bin/python
+
+import sys
+import importlib
+from pathlib import Path
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'cert': {'interface': 'tls-certificates'}},
+ 'provides': {'cert': {'interface': 'tls-certificates'}},
+ }
+ sys.path.append('..')
+ sys.modules[''] = importlib.import_module(Path.cwd().name)
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/easyrsa/hooks/relations/tls-certificates/provides.py b/easyrsa/hooks/relations/tls-certificates/provides.py
new file mode 100644
index 0000000..0262baa
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/provides.py
@@ -0,0 +1,301 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+
+from .tls_certificates_common import (
+ ApplicationCertificateRequest,
+ CertificateRequest
+)
+
+
+class TlsProvides(Endpoint):
+ """
+ The provider's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [all_requests]: provides.md#provides.TlsProvides.all_requests
+ [new_requests]: provides.md#provides.TlsProvides.new_requests
+ [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+ [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+ toggle_flag(self.expand_name('{endpoint_name}.certs.requested'),
+ self.new_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'),
+ self.new_client_requests)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'),
+ self.new_application_requests)
+ # For backwards compatibility, set the old "cert" flags as well
+ toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'),
+ self.new_client_requests)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.server.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.client.certs.requested'))
+ clear_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'))
+
+ def set_ca(self, certificate_authority):
+ """
+ Publish the CA to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same CA, so send it to them.
+ relation.to_publish_raw['ca'] = certificate_authority
+
+ def set_chain(self, chain):
+ """
+ Publish the chain of trust to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same chain, so send it to them.
+ relation.to_publish_raw['chain'] = chain
+
+ def set_client_cert(self, cert, key):
+ """
+ Deprecated. This is only for backwards compatibility.
+
+ Publish a globally shared client cert and key.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'client.cert': cert,
+ 'client.key': key,
+ })
+
+ def set_server_cert(self, scope, cert, key):
+ """
+ Deprecated. Use one of the [new_requests][] collections and
+ `request.set_cert()` instead.
+
+ Set the server cert and key for the request identified by `scope`.
+ """
+ request = self.get_server_requests()[scope]
+ request.set_cert(cert, key)
+
+ def set_server_multicerts(self, scope):
+ """
+ Deprecated. Done automatically.
+ """
+ pass
+
+ def add_server_cert(self, scope, cn, cert, key):
+ '''
+ Deprecated. Use `request.set_cert()` instead.
+ '''
+ self.set_server_cert(scope, cert, key)
+
+ def get_server_requests(self):
+ """
+ Deprecated. Use the [new_requests][] or [server_requests][]
+ collections instead.
+
+ One provider can have many requests to generate server certificates.
+ Return a map of all server request objects indexed by a unique
+ identifier.
+ """
+ return {req._key: req for req in self.new_server_requests}
+
+ @property
+ def all_requests(self):
+ """
+ List of all requests that have been made.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('certs.regen',
+ 'tls.certs.available')
+ def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ requests = []
+ for unit in self.all_joined_units:
+ # handle older single server cert request
+ if unit.received_raw['common_name']:
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ unit.received_raw['certificate_name'],
+ unit.received_raw['common_name'],
+ unit.received['sans'],
+ ))
+
+ # handle mutli server cert requests
+ reqs = unit.received['cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+
+ # handle client cert requests
+ reqs = unit.received['client_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'client',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+ # handle application cert requests
+ reqs = unit.received['application_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(ApplicationCertificateRequest(
+ unit,
+ 'application',
+ common_name,
+ common_name,
+ req['sans']
+ ))
+ return requests
+
+ @property
+ def new_requests(self):
+ """
+ Filtered view of [all_requests][] that only includes requests that
+ haven't been handled.
+
+ Each will be an instance of [CertificateRequest][].
+
+ This collection can also be further filtered by request type using
+ [new_server_requests][] or [new_client_requests][].
+
+ Example usage:
+
+ ```python
+ @when('tls.certs.requested')
+ def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.all_requests if not req.is_handled]
+
+ @property
+ def new_server_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes server cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.server.certs.requested')
+ def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'server']
+
+ @property
+ def new_client_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes client cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.client.certs.requested')
+ def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'client']
+
+ @property
+ def new_application_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes application cert
+ requests.
+
+ Each will be an instance of [ApplicationCertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.application.certs.requested')
+ def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+
+ :returns: List of certificate requests.
+ :rtype: [CertificateRequest, ]
+ """
+ return [req for req in self.new_requests
+ if req.cert_type == 'application']
+
+ @property
+ def all_published_certs(self):
+ """
+ List of all [Certificate][] instances that this provider has published
+ for all related applications.
+ """
+ return [req.cert for req in self.all_requests if req.cert]
diff --git a/easyrsa/hooks/relations/tls-certificates/pydocmd.yml b/easyrsa/hooks/relations/tls-certificates/pydocmd.yml
new file mode 100644
index 0000000..c568913
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'TLS Certificates Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.TlsRequires+
+ - provides.md:
+ - provides
+ - provides.TlsProvides+
+ - common.md:
+ - tls_certificates_common.CertificateRequest+
+ - tls_certificates_common.Certificate+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Common: common.md
+
+gens_dir: docs
diff --git a/easyrsa/hooks/relations/tls-certificates/requires.py b/easyrsa/hooks/relations/tls-certificates/requires.py
new file mode 100644
index 0000000..951f953
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/requires.py
@@ -0,0 +1,342 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+import uuid
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+from charms.reactive import Endpoint
+from charms.reactive import data_changed
+
+from .tls_certificates_common import Certificate
+
+
+class TlsRequires(Endpoint):
+ """
+ The client's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+ [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+ [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+ [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+ [server_certs]: requires.md#requires.TlsRequires.server_certs
+ [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+ [client_certs]: requires.md#requires.TlsRequires.server_certs
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ self.relations[0].to_publish_raw['unit_name'] = self._unit_name
+ prefix = self.expand_name('{endpoint_name}.')
+ ca_available = self.root_ca_cert
+ ca_changed = ca_available and data_changed(prefix + 'ca',
+ self.root_ca_cert)
+ server_available = self.server_certs
+ server_changed = server_available and data_changed(prefix + 'servers',
+ self.server_certs)
+ client_available = self.client_certs
+ client_changed = client_available and data_changed(prefix + 'clients',
+ self.client_certs)
+ certs_available = server_available or client_available
+ certs_changed = server_changed or client_changed
+
+ set_flag(prefix + 'available')
+ toggle_flag(prefix + 'ca.available', ca_available)
+ toggle_flag(prefix + 'ca.changed', ca_changed)
+ toggle_flag(prefix + 'server.certs.available', server_available)
+ toggle_flag(prefix + 'server.certs.changed', server_changed)
+ toggle_flag(prefix + 'client.certs.available', client_available)
+ toggle_flag(prefix + 'client.certs.changed', client_changed)
+ toggle_flag(prefix + 'certs.available', certs_available)
+ toggle_flag(prefix + 'certs.changed', certs_changed)
+ # deprecated
+ toggle_flag(prefix + 'server.cert.available', self.server_certs)
+ toggle_flag(prefix + 'client.cert.available', self.get_client_cert())
+ toggle_flag(prefix + 'batch.cert.available', self.server_certs)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ prefix = self.expand_name('{endpoint_name}.')
+ clear_flag(prefix + 'available')
+ clear_flag(prefix + 'ca.available')
+ clear_flag(prefix + 'ca.changed')
+ clear_flag(prefix + 'server.certs.available')
+ clear_flag(prefix + 'server.certs.changed')
+ clear_flag(prefix + 'client.certs.available')
+ clear_flag(prefix + 'client.certs.changed')
+ clear_flag(prefix + 'certs.available')
+ clear_flag(prefix + 'certs.changed')
+ # deprecated
+ clear_flag(prefix + 'server.cert.available')
+ clear_flag(prefix + 'client.cert.available')
+ clear_flag(prefix + 'batch.cert.available')
+
+ @property
+ def _unit_name(self):
+ return hookenv.local_unit().replace('/', '_')
+
+ @property
+ def root_ca_cert(self):
+ """
+ Root CA certificate.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['ca']
+
+ def get_ca(self):
+ """
+ Return the root CA certificate.
+
+ Same as [root_ca_cert][].
+ """
+ return self.root_ca_cert
+
+ @property
+ def root_ca_chain(self):
+ """
+ The chain of trust for the root CA.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['chain']
+
+ def get_chain(self):
+ """
+ Return the chain of trust for the root CA.
+
+ Same as [root_ca_chain][].
+ """
+ return self.root_ca_chain
+
+ def get_client_cert(self):
+ """
+ Deprecated. Use [request_client_cert][] and the [client_certs][]
+ collection instead.
+
+ Return a globally shared client certificate and key.
+ """
+ data = self.all_joined_units.received_raw
+ return (data['client.cert'], data['client.key'])
+
+ def get_server_cert(self):
+ """
+ Deprecated. Use the [server_certs][] collection instead.
+
+ Return the cert and key of the first server certificate requested.
+ """
+ if not self.server_certs:
+ return (None, None)
+ cert = self.server_certs[0]
+ return (cert.cert, cert.key)
+
+ @property
+ def server_certs(self):
+ """
+ List of [Certificate][] instances for all available server certs.
+ """
+ certs = []
+ raw_data = self.all_joined_units.received_raw
+ json_data = self.all_joined_units.received
+
+ # for backwards compatibility, the first cert goes in its own fields
+ if self.relations:
+ common_name = self.relations[0].to_publish_raw['common_name']
+ cert = raw_data['{}.server.cert'.format(self._unit_name)]
+ key = raw_data['{}.server.key'.format(self._unit_name)]
+ if cert and key:
+ certs.append(Certificate('server',
+ common_name,
+ cert,
+ key))
+
+ # subsequent requests go in the collection
+ field = '{}.processed_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ certs.extend(Certificate('server',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items())
+ return certs
+
+ @property
+ def application_certs(self):
+ """
+ List containg the application Certificate cert.
+
+ :returns: A list containing one certificate
+ :rtype: [Certificate()]
+ """
+ certs = []
+ json_data = self.all_joined_units.received
+ field = '{}.processed_application_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ app_cert_data = certs_data.get('app_data')
+ if app_cert_data:
+ certs = [Certificate(
+ 'server',
+ 'app_data',
+ app_cert_data['cert'],
+ app_cert_data['key'])]
+ return certs
+
+ @property
+ def server_certs_map(self):
+ """
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.server_certs}
+
+ def get_batch_requests(self):
+ """
+ Deprecated. Use [server_certs_map][] instead.
+
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return self.server_certs_map
+
+ @property
+ def client_certs(self):
+ """
+ List of [Certificate][] instances for all available client certs.
+ """
+ field = '{}.processed_client_requests'.format(self._unit_name)
+ certs_data = self.all_joined_units.received[field] or {}
+ return [Certificate('client',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items()]
+
+ @property
+ def client_certs_map(self):
+ """
+ Mapping of client [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.client_certs}
+
+ def request_server_cert(self, cn, sans=None, cert_name=None):
+ """
+ Request a server certificate and key be generated for the given
+ common name (`cn`) and optional list of alternative names (`sans`).
+
+ The `cert_name` is deprecated and not needed.
+
+ This can be called multiple times to request more than one server
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ to_publish_raw = self.relations[0].to_publish_raw
+ if to_publish_raw['common_name'] in (None, '', cn):
+ # for backwards compatibility, first request goes in its own fields
+ to_publish_raw['common_name'] = cn
+ to_publish_json['sans'] = sans or []
+ cert_name = to_publish_raw.get('certificate_name') or cert_name
+ if cert_name is None:
+ cert_name = str(uuid.uuid4())
+ to_publish_raw['certificate_name'] = cert_name
+ else:
+ # subsequent requests go in the collection
+ requests = to_publish_json.get('cert_requests', {})
+ requests[cn] = {'sans': sans or []}
+ to_publish_json['cert_requests'] = requests
+
+ def add_request_server_cert(self, cn, sans):
+ """
+ Deprecated. Use [request_server_cert][] instead.
+ """
+ self.request_server_cert(cn, sans)
+
+ def request_server_certs(self):
+ """
+ Deprecated. Just use [request_server_cert][]; this does nothing.
+ """
+ pass
+
+ def request_client_cert(self, cn, sans):
+ """
+ Request a client certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans`).
+
+ This can be called multiple times to request more than one client
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('client_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['client_cert_requests'] = requests
+
+ def request_application_cert(self, cn, sans):
+ """
+ Request an application certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans` ) of this
+ unit and all peer units. All units will share a single certificates.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('application_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['application_cert_requests'] = requests
diff --git a/easyrsa/hooks/relations/tls-certificates/tls_certificates_common.py b/easyrsa/hooks/relations/tls-certificates/tls_certificates_common.py
new file mode 100644
index 0000000..99a2f8c
--- /dev/null
+++ b/easyrsa/hooks/relations/tls-certificates/tls_certificates_common.py
@@ -0,0 +1,302 @@
+from charms.reactive import clear_flag, is_data_changed, data_changed
+
+
+class CertificateRequest(dict):
+ def __init__(self, unit, cert_type, cert_name, common_name, sans):
+ self._unit = unit
+ self._cert_type = cert_type
+ super().__init__({
+ 'certificate_name': cert_name,
+ 'common_name': common_name,
+ 'sans': sans,
+ })
+
+ @property
+ def _key(self):
+ return '.'.join((self._unit.relation.relation_id,
+ self.unit_name,
+ self.common_name))
+
+ def resolve_unit_name(self, unit):
+ """Return name of unit associated with this request.
+
+ unit_name should be provided in the relation data to ensure
+ compatability with cross-model relations. If the unit name
+ is absent then fall back to unit_name attribute of the
+ unit associated with this request.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: Name of unit
+ :rtype: str
+ """
+ unit_name = unit.received_raw['unit_name']
+ if not unit_name:
+ unit_name = unit.unit_name
+ return unit_name
+
+ @property
+ def unit_name(self):
+ """Name of this unit.
+
+ :returns: Name of unit
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).replace('/', '_')
+
+ @property
+ def application_name(self):
+ """Name of the application which the request came from.
+
+ :returns: Name of application
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).split('/')[0]
+
+ @property
+ def cert_type(self):
+ """
+ Type of certificate, 'server' or 'client', being requested.
+ """
+ return self._cert_type
+
+ @property
+ def cert_name(self):
+ return self['certificate_name']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def sans(self):
+ return self['sans']
+
+ @property
+ def _publish_key(self):
+ if self.cert_type == 'server':
+ return '{}.processed_requests'.format(self.unit_name)
+ elif self.cert_type == 'client':
+ return '{}.processed_client_requests'.format(self.unit_name)
+ raise ValueError('Unknown cert_type: {}'.format(self.cert_type))
+
+ @property
+ def _server_cert_key(self):
+ return '{}.server.cert'.format(self.unit_name)
+
+ @property
+ def _server_key_key(self):
+ return '{}.server.key'.format(self.unit_name)
+
+ @property
+ def _is_top_level_server_cert(self):
+ return (self.cert_type == 'server' and
+ self.common_name == self._unit.received_raw['common_name'])
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+ """
+ cert, key = None, None
+ if self._is_top_level_server_cert:
+ tpr = self._unit.relation.to_publish_raw
+ cert = tpr[self._server_cert_key]
+ key = tpr[self._server_key_key]
+ else:
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get(self.common_name, {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ def set_cert(self, cert, key):
+ rel = self._unit.relation
+ if self._is_top_level_server_cert:
+ # backwards compatibility; if this is the cert that was requested
+ # as a single server cert, set it in the response as the single
+ # server cert
+ rel.to_publish_raw.update({
+ self._server_cert_key: cert,
+ self._server_key_key: key,
+ })
+ else:
+ data = rel.to_publish.get(self._publish_key, {})
+ data[self.common_name] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[self._publish_key] = data
+ if not rel.endpoint.new_server_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.server'
+ '.cert.requested'))
+ if not rel.endpoint.new_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.'
+ 'certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class ApplicationCertificateRequest(CertificateRequest):
+ """
+ A request for an application consistent certificate.
+
+ This is a request for a certificate that works for all units of an
+ application. All sans and cns are added together to produce one
+ certificate and the same certificate and key are sent to all the
+ units of an application. Only one ApplicationCertificateRequest
+ is needed per application.
+ """
+
+ @property
+ def _key(self):
+ """Key to identify this cert.
+
+ :returns: cert key
+ :rtype: str
+ """
+ return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert')
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+
+ :returns: Certificate
+ :rtype: Certificate or None
+ """
+ cert, key = None, None
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get('app_data', {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ """Whether the certificate has been handled.
+
+ :returns: If the cert has been handled
+ :rtype: bool
+ """
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ @property
+ def sans(self):
+ """Generate a list of all sans from all units of application
+
+ Examine all units of the application and compile a list of
+ all sans. CNs are treated as addition san entries.
+
+ :returns: List of sans
+ :rtype: List[str]
+ """
+ _sans = []
+ for unit in self._unit.relation.units:
+ reqs = unit.received['application_cert_requests'] or {}
+ for cn, req in reqs.items():
+ _sans.append(cn)
+ _sans.extend(req['sans'])
+ return sorted(list(set(_sans)))
+
+ @property
+ def _request_key(self):
+ """Key used to request cert
+
+ :returns: Key used to request cert
+ :rtype: str
+ """
+ return 'application_cert_requests'
+
+ def derive_publish_key(self, unit=None):
+ """Derive the application cert publish key for a unit.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: publish key
+ :rtype: str
+ """
+ if not unit:
+ unit = self._unit
+ unit_name = self.resolve_unit_name(unit).replace('/', '_')
+ return '{}.processed_application_requests'.format(unit_name)
+
+ @property
+ def _publish_key(self):
+ """Key used to publish cert
+
+ :returns: Key used to publish cert
+ :rtype: str
+ """
+ return self.derive_publish_key(unit=self._unit)
+
+ def set_cert(self, cert, key):
+ """Send the cert and key to all units of the application
+
+ :param cert: TLS Certificate
+ :type cert: str
+ :param key: TLS Private Key
+ :type cert: str
+ """
+ rel = self._unit.relation
+ for unit in self._unit.relation.units:
+ pub_key = self.derive_publish_key(unit=unit)
+ data = rel.to_publish.get(
+ pub_key,
+ {})
+ data['app_data'] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[pub_key] = data
+ if not rel.endpoint.new_application_requests:
+ clear_flag(rel.endpoint.expand_name(
+ '{endpoint_name}.application.certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class Certificate(dict):
+ """
+ Represents a created certificate and key.
+
+ The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+ be accessed either as properties or as the contents of the dict.
+ """
+ def __init__(self, cert_type, common_name, cert, key):
+ super().__init__({
+ 'cert_type': cert_type,
+ 'common_name': common_name,
+ 'cert': cert,
+ 'key': key,
+ })
+
+ @property
+ def cert_type(self):
+ return self['cert_type']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def cert(self):
+ return self['cert']
+
+ @property
+ def key(self):
+ return self['key']
diff --git a/easyrsa/hooks/start b/easyrsa/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/stop b/easyrsa/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/update-status b/easyrsa/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/hooks/upgrade-charm b/easyrsa/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/easyrsa/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/easyrsa/icon.svg b/easyrsa/icon.svg
new file mode 100644
index 0000000..67aba5a
--- /dev/null
+++ b/easyrsa/icon.svg
@@ -0,0 +1,352 @@
+
+
+
+
diff --git a/easyrsa/layer.yaml b/easyrsa/layer.yaml
new file mode 100644
index 0000000..b70ddb8
--- /dev/null
+++ b/easyrsa/layer.yaml
@@ -0,0 +1,23 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "layer:debug"
+- "layer:leadership"
+- "layer:status"
+- "interface:tls-certificates"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests",
+ ".tox", "__pycache__", "conftest.py"]
+"options":
+ "basic":
+ "packages":
+ - "openssl"
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "debug": {}
+ "leadership": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "easyrsa": {}
+"repo": "http://github.com/juju-solutions/layer-easyrsa.git"
+"is": "easyrsa"
diff --git a/easyrsa/lib/charms/layer/__init__.py b/easyrsa/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/easyrsa/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/easyrsa/lib/charms/layer/basic.py b/easyrsa/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/easyrsa/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/easyrsa/lib/charms/layer/execd.py b/easyrsa/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/easyrsa/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<.
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.reactive import not_unless
+
+
+__all__ = ['leader_get', 'leader_set']
+
+
+@not_unless('leadership.is_leader')
+def leader_set(*args, **kw):
+ '''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
+
+ Settings may either be passed in as a single dictionary, or using
+ keyword arguments. All values must be strings.
+
+ The leadership.set.{key} reactive state will be set while the
+ leadership hook environment setting remains set.
+
+ Changed leadership settings will set the leadership.changed.{key}
+ and leadership.changed states. These states will remain set until
+ the following hook.
+
+ These state changes take effect immediately on the leader, and
+ in future hooks run on non-leaders. In this way both leaders and
+ non-leaders can share handlers, waiting on these states.
+ '''
+ if args:
+ if len(args) > 1:
+ raise TypeError('leader_set() takes 1 positional argument but '
+ '{} were given'.format(len(args)))
+ else:
+ settings = dict(args[0])
+ else:
+ settings = {}
+ settings.update(kw)
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+
+ for key, value in settings.items():
+ if value != previous.get(key):
+ reactive.set_state('leadership.changed.{}'.format(key))
+ reactive.set_state('leadership.changed')
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ hookenv.leader_set(settings)
+ unitdata.kv().update(settings, prefix='leadership.settings.')
+
+
+def leader_get(attribute=None):
+ '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
+ return hookenv.leader_get(attribute)
diff --git a/easyrsa/lib/debug_script.py b/easyrsa/lib/debug_script.py
new file mode 100644
index 0000000..e156924
--- /dev/null
+++ b/easyrsa/lib/debug_script.py
@@ -0,0 +1,8 @@
+import os
+
+dir = os.environ["DEBUG_SCRIPT_DIR"]
+
+
+def open_file(path, *args, **kwargs):
+ """ Open a file within the debug script dir """
+ return open(os.path.join(dir, path), *args, **kwargs)
diff --git a/easyrsa/make_docs b/easyrsa/make_docs
new file mode 100644
index 0000000..dcd4c1f
--- /dev/null
+++ b/easyrsa/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import os
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ sys.path.insert(0, 'lib')
+ sys.path.insert(1, 'reactive')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
+ if os.path.exists('.unit-state.db'):
+ os.remove('.unit-state.db')
diff --git a/easyrsa/metadata.yaml b/easyrsa/metadata.yaml
new file mode 100644
index 0000000..15931aa
--- /dev/null
+++ b/easyrsa/metadata.yaml
@@ -0,0 +1,35 @@
+"name": "easyrsa"
+"summary": "Delivers EasyRSA to create a Certificate Authority (CA)."
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Rye Terrell "
+- "Konstantinos Tsakalozos "
+- "Matthew Bruzek "
+"description": |
+ This charm delivers the EasyRSA application and through Juju events creates
+ a Certificate Authority (CA), server certificates, and client certificates.
+"tags":
+- "misc"
+- "tls"
+- "pki"
+- "ca"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+- "trusty"
+"provides":
+ "client":
+ "interface": "tls-certificates"
+"resources":
+ "easyrsa":
+ "type": "file"
+ "filename": "easyrsa.tgz"
+ "description": |
+ The release of the EasyRSA software you would like to use to create
+ certificate authority (CA) and other Public Key Infrastructure (PKI).
+ This charm was written using v3.0.1, so earlier versions of EasyRSA may
+ not work. You can find the releases of EasyRSA at
+ https://github.com/OpenVPN/easy-rsa/releases
+"subordinate": !!bool "false"
diff --git a/easyrsa/pydocmd.yml b/easyrsa/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/easyrsa/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/easyrsa/reactive/__init__.py b/easyrsa/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/easyrsa/reactive/easyrsa.py b/easyrsa/reactive/easyrsa.py
new file mode 100644
index 0000000..94ef275
--- /dev/null
+++ b/easyrsa/reactive/easyrsa.py
@@ -0,0 +1,486 @@
+import os
+import shutil
+
+from shlex import split
+from subprocess import check_call
+from subprocess import check_output
+
+from charms.reactive import hook
+from charms.reactive import when
+from charms.reactive import when_not
+from charms.reactive.helpers import data_changed
+from charms.reactive.relations import endpoint_from_flag
+from charms.reactive.flags import is_flag_set
+from charms.reactive.flags import clear_flag
+from charms.reactive.flags import set_flag
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+from charmhelpers.core.host import chdir
+from charmhelpers.core.hookenv import resource_get
+
+from charms.leadership import leader_set
+from charms.leadership import leader_get
+
+from charms.layer import status
+
+
+charm_directory = hookenv.charm_dir()
+easyrsa_directory = os.path.join(charm_directory, 'EasyRSA')
+
+
+@when_not('easyrsa.installed')
+def install():
+ '''Install the easy-rsa software that is used by this layer.'''
+ easyrsa_resource = None
+ try:
+ # Try to get the resource from Juju.
+ easyrsa_resource = resource_get('easyrsa')
+ except Exception as e:
+ message = 'An error occurred fetching the easyrsa resource.'
+ hookenv.log(message)
+ hookenv.log(e)
+ status.blocked(message)
+ return
+
+ if not easyrsa_resource:
+ status.blocked('The easyrsa resource is missing.')
+ return
+
+ # Get the filesize in bytes.
+ filesize = os.stat(easyrsa_resource).st_size
+ # When the filesize is less than 10 KB we do not have a real file.
+ if filesize < 10240:
+ status.blocked('The easyrsa resource is not complete.')
+ return
+
+ # Expand the archive in the charm directory creating an EasyRSA directory.
+ untar = 'tar -xvzf {0} -C {1}'.format(easyrsa_resource, charm_directory)
+ check_call(split(untar))
+
+ version = get_version(easyrsa_resource)
+ # Save the version in the key/value store of the charm.
+ unitdata.kv().set('easyrsa-version', version)
+
+ if os.path.islink(easyrsa_directory):
+ check_call(split('rm -v {0}'.format(easyrsa_directory)))
+
+ # Link the EasyRSA version directory to a common name.
+ link = 'ln -v -s {0}/EasyRSA-{1} {2}'.format(charm_directory,
+ version,
+ easyrsa_directory)
+ check_call(split(link))
+ # The charm pki directory contains backup of pki for upgrades.
+ charm_pki_directory = os.path.join(charm_directory, 'pki')
+ if os.path.isdir(charm_pki_directory):
+ new_pki_directory = os.path.join(easyrsa_directory, 'pki')
+ # Only copy the directory if the new_pki_directory does not exist.
+ if not os.path.isdir(new_pki_directory):
+ # Copy the pki to this new directory.
+ shutil.copytree(charm_pki_directory, new_pki_directory,
+ symlinks=True)
+ # We are done with the old charm pki directory, so delete contents.
+ shutil.rmtree(charm_pki_directory)
+ else:
+ # Create new pki.
+ with chdir(easyrsa_directory):
+ check_call(split('./easyrsa --batch init-pki 2>&1'))
+ set_flag('easyrsa.installed')
+
+
+@when('easyrsa.installed')
+def set_easyrsa_version():
+ '''Find the version of easyrsa and set that on the charm.'''
+ version = unitdata.kv().get('easyrsa-version')
+ hookenv.application_version_set(version)
+
+
+@when('easyrsa.installed')
+@when_not('easyrsa.configured')
+def configure_easyrsa():
+ '''A transitional state to allow modifications to configuration before
+ generating the certificates and working with PKI.'''
+ hookenv.log('Configuring OpenSSL to copy extensions.')
+ configure_copy_extensions()
+ hookenv.log('Configuring X509 server extensions with clientAuth.')
+ configure_client_authorization()
+ set_flag('easyrsa.configured')
+
+
+def configure_copy_extensions():
+ '''Update the EasyRSA configuration with the capacity to copy the exensions
+ through to the resulting certificates. '''
+ # Create an absolute path to the file which will not be impacted by cwd.
+ openssl_file = os.path.join(easyrsa_directory, 'openssl-1.0.cnf')
+ # Update EasyRSA configuration with the capacity to copy CSR Requested
+ # Extensions through to the resulting certificate. This can be tricky,
+ # and the implications are not fully clear on this.
+ with open(openssl_file, 'r') as f:
+ conf = f.readlines()
+ # When the copy_extensions key is not in the configuration.
+ if 'copy_extensions = copy\n' not in conf:
+ for idx, line in enumerate(conf):
+ if '[ CA_default ]' in line:
+ # Insert a new line with the copy_extensions key set to copy.
+ conf.insert(idx + 1, "copy_extensions = copy\n")
+ with open(openssl_file, 'w+') as f:
+ f.writelines(conf)
+
+
+def configure_client_authorization():
+ '''easyrsa has a default OpenSSL configuration that does not support
+ client authentication. Append "clientAuth" to the server ssl certificate
+ configuration. This is not default, to enable this in your charm set the
+ reactive state 'tls.client.authorization.required'.
+ '''
+ # Use an absolute path so current directory does not affect the result.
+ openssl_config = os.path.join(easyrsa_directory, 'x509-types/server')
+ hookenv.log('Updating {0}'.format(openssl_config))
+
+ # Read the X509 server extension file in.
+ with open(openssl_config, 'r') as f:
+ server_extensions = f.readlines()
+
+ client_server = []
+ for line in server_extensions:
+ # Replace the extendedKeyUsage with clientAuth and serverAuth.
+ if 'extendedKeyUsage' in line:
+ line = line.replace('extendedKeyUsage = serverAuth',
+ 'extendedKeyUsage = clientAuth, serverAuth')
+ client_server.append(line)
+ # Write the configuration file back out.
+ with open(openssl_config, 'w+') as f:
+ f.writelines(client_server)
+
+
+@when('easyrsa.configured')
+@when('leadership.is_leader')
+@when_not('easyrsa.certificate.authority.available')
+@when_not('upgrade.series.in-progress')
+def create_certificate_authority():
+ '''Return the CA and server certificates for this system. If the CA is
+ empty, generate a self signged certificate authority.'''
+ ca_file = 'pki/ca.crt'
+ key_file = 'pki/private/ca.key'
+ serial_file = 'pki/serial'
+
+ with chdir(easyrsa_directory):
+ if leader_get('certificate_authority') and \
+ leader_get('certificate_authority_key') and \
+ leader_get('certificate_authority_serial'):
+ hookenv.log('Recovering CA from controller')
+ certificate_authority = \
+ leader_get('certificate_authority')
+ certificate_authority_key = \
+ leader_get('certificate_authority_key')
+ certificate_authority_serial = \
+ leader_get('certificate_authority_serial')
+
+ # Write the CA from existing relation.
+ with open(ca_file, 'w') as f_out:
+ f_out.write(certificate_authority)
+
+ # Write the private key from existing relation.
+ with open(key_file, 'w') as f_out:
+ f_out.write(certificate_authority_key)
+
+ # Write the serial from existing relation.
+ with open(serial_file, 'w') as f_out:
+ f_out.write(certificate_authority_serial)
+
+ # Bluff required files and folders.
+ with open('pki/index.txt', 'w') as f_out:
+ pass
+ os.makedirs('pki/issued')
+ os.makedirs('pki/certs_by_serial')
+
+ else:
+ hookenv.log('Creating new CA')
+ # The Common Name (CN) for a certificate
+ # must be an IP or hostname.
+ cn = hookenv.unit_public_ip()
+ # Create a self signed CA with the CN, stored pki/ca.crt
+ build_ca = \
+ './easyrsa --batch "--req-cn={0}" build-ca nopass 2>&1'
+ # Build a self signed Certificate Authority.
+ check_call(split(build_ca.format(cn)))
+
+ # Read the CA so it can be returned in leader data.
+ with open(ca_file, 'r') as stream:
+ certificate_authority = stream.read()
+
+ # Read the private key so it can be set in leader data.
+ with open(key_file, 'r') as stream:
+ certificate_authority_key = stream.read()
+
+ with open(serial_file, 'r') as stream:
+ certificate_authority_serial = stream.read()
+
+ # Set these values on the leadership data.
+ leader_set({
+ 'certificate_authority': certificate_authority})
+ leader_set({
+ 'certificate_authority_key': certificate_authority_key})
+ leader_set({
+ 'certificate_authority_serial': certificate_authority_serial})
+
+ # Install the CA on this system as a trusted CA.
+ install_ca(certificate_authority)
+ status.active('Certificiate Authority available')
+
+ set_flag('easyrsa.certificate.authority.available')
+
+
+@when('easyrsa.certificate.authority.available')
+@when_not('upgrade.series.in-progress')
+def message():
+ '''Set a message to notify the user that this charm is ready.'''
+ if is_flag_set('client.available'):
+ status.active('Certificate Authority connected.')
+ else:
+ status.active('Certificate Authority ready.')
+
+
+@when('client.available', 'easyrsa.certificate.authority.available')
+@when('leadership.is_leader')
+def send_ca():
+ '''The client relationship has been established, read the CA and client
+ certificate from leadership data to set them on the relationship object.'''
+ tls = endpoint_from_flag('client.available')
+ certificate_authority = leader_get('certificate_authority')
+ tls.set_ca(certificate_authority)
+
+
+@when('leadership.is_leader',
+ 'easyrsa.certificate.authority.available',
+ 'client.available')
+@when_not('easyrsa.global-client-cert.created')
+def create_global_client_cert():
+ """
+ This is for backwards compatibility with older tls-certificate clients
+ only. Obviously, it's not good security / design to have clients sharing
+ a certificate, but it seems that there are clients that depend on this
+ (though some, like etcd, only block on the flag that it triggers but don't
+ actually use the cert), so we have to set it for now.
+ """
+ client_cert = leader_get('client_certificate')
+ client_key = leader_get('client_key')
+ if not client_cert or not client_key:
+ hookenv.log("Unable to find global client cert on "
+ "leadership data, generating...")
+ client_cert, client_key = create_client_certificate()
+ # Set the client certificate and key on leadership data.
+ leader_set({'client_certificate': client_cert})
+ leader_set({'client_key': client_key})
+ else:
+ hookenv.log("found global client cert on leadership "
+ "data, not generating...")
+ set_flag('easyrsa.global-client-cert.created')
+
+
+@when('leadership.is_leader',
+ 'easyrsa.global-client-cert.created',
+ 'client.available')
+def publish_global_client_cert():
+ # global client cert needs to always be re-published to account for new
+ # clients joining
+ tls = endpoint_from_flag('client.available')
+ tls.set_client_cert(leader_get('client_certificate'),
+ leader_get('client_key'))
+
+
+@when('client.server.certs.requested', 'easyrsa.configured')
+def create_server_cert():
+ '''Create server certificates with the request information from the
+ relation object.'''
+
+ tls = endpoint_from_flag('client.server.certs.requested')
+
+ # Iterate over all new requests
+ for request in tls.new_server_requests:
+ cn = request.common_name
+ sans = request.sans
+ name = request.common_name
+ # Create the server certificate based on the information in request.
+ server_cert, server_key = create_server_certificate(cn, sans, name)
+ # Set the certificate and key for the unit on the relationship object.
+ request.set_cert(server_cert, server_key)
+
+
+@when('client.client.certs.requested', 'easyrsa.configured')
+def create_client_cert():
+ '''Create client certificates with the request information from the
+ relation object.'''
+
+ tls = endpoint_from_flag('client.client.certs.requested')
+
+ # Iterate over all new requests
+ for request in tls.new_client_requests:
+ # Create a client certificate for this request.
+ name = request.common_name
+ client_cert, client_key = create_client_certificate(name)
+ # Set the client certificate and key on the relationship object.
+ request.set_cert(client_cert, client_key)
+
+
+@hook('upgrade-charm')
+def upgrade():
+ '''An upgrade has been triggered.'''
+ pki_directory = os.path.join(easyrsa_directory, 'pki')
+ if os.path.isdir(pki_directory):
+ # specific handling if the upgrade is from a previous version
+ # where certificate_authority_serial is not set at install
+ serial_file = 'serial'
+ with chdir(pki_directory):
+ # if the ca and ca_key are set and serial is not
+ # set this to serial in the pki directory
+ if os.path.isfile(serial_file) and \
+ leader_get('certificate_authority') and \
+ leader_get('certificate_authority_key') and not \
+ leader_get('certificate_authority_serial'):
+ with open(serial_file, 'r') as stream:
+ ca_serial = stream.read()
+ # set the previously unset certificate authority serial
+ leader_set({
+ 'certificate_authority_serial': ca_serial})
+
+ charm_pki_directory = os.path.join(charm_directory, 'pki')
+ # When the charm pki directory exists, it is stale, remove it.
+ if os.path.isdir(charm_pki_directory):
+ shutil.rmtree(charm_pki_directory)
+ # Copy the EasyRSA/pki to the charm pki directory.
+ shutil.copytree(pki_directory, charm_pki_directory, symlinks=True)
+ clear_flag('easyrsa.installed')
+ clear_flag('easyrsa.configured')
+
+
+@hook('pre-series-upgrade')
+def pre_series_upgrade():
+ status.blocked('Series upgrade in progress')
+
+
+def remove_file_if_exists(filename):
+ try:
+ os.remove(filename)
+ except FileNotFoundError:
+ pass
+
+
+def create_server_certificate(cn, san_list, name=None):
+ '''Return a newly created server certificate and server key from a
+ common name, list of Subject Alternate Names, and the certificate name.'''
+ if name is None:
+ name = 'server'
+ server_cert = None
+ server_key = None
+ with chdir(easyrsa_directory):
+ # Create the path to the server certificate.
+ cert_file = 'pki/issued/{0}.crt'.format(name)
+ # Create the path to the server key.
+ key_file = 'pki/private/{0}.key'.format(name)
+ # Create the path to the request file
+ req_file = 'pki/reqs/{0}.req'.format(name)
+ # Get a string compatible with easyrsa for the subject-alt-names.
+ sans = get_sans(san_list)
+ sans_arg = '--subject-alt-name={}'.format(sans) if sans else ''
+ this_cert = {'sans': sans, 'cn': cn, 'name': name}
+ changed = data_changed('server_cert.' + name, this_cert)
+ cert_exists = os.path.isfile(cert_file) and os.path.isfile(key_file)
+ # Do not regenerate the server certificate if it already exists
+ # and the data hasn't changed.
+ if changed and cert_exists:
+ # We need to revoke the existing cert and regenerate it
+ revoke = './easyrsa --batch revoke {0}'.format(name)
+ check_call(split(revoke))
+ # nuke old files if they exist
+ remove_file_if_exists(cert_file)
+ remove_file_if_exists(key_file)
+ remove_file_if_exists(req_file)
+ if changed or not cert_exists:
+ # Create a server certificate for the server based on the CN.
+ server = './easyrsa --batch --req-cn={0} {1} ' \
+ 'build-server-full {2} nopass 2>&1'.format(cn,
+ sans_arg,
+ name)
+ check_call(split(server))
+ # Read the server certificate from the file system.
+ with open(cert_file, 'r') as stream:
+ server_cert = stream.read()
+ # Read the server key from the file system.
+ with open(key_file, 'r') as stream:
+ server_key = stream.read()
+ return server_cert, server_key
+
+
+def create_client_certificate(name='client'):
+ '''Return a newly created client certificate and client key, by name.'''
+ client_cert = None
+ client_key = None
+ with chdir(easyrsa_directory):
+ # Create a path to the client certificate.
+ cert_file = 'pki/issued/{0}.crt'.format(name)
+ # Create a path to the client key.
+ key_file = 'pki/private/{0}.key'.format(name)
+ # Do not regenerate the client certificate if it already exists.
+ if not os.path.isfile(cert_file) and not os.path.isfile(key_file):
+ # Create a client certificate and key.
+ check_call(['./easyrsa', 'build-client-full', name, 'nopass'])
+ # Read the client certificate from the file system.
+ with open(cert_file, 'r') as stream:
+ client_cert = stream.read()
+ # Read the client key from the file system.
+ with open(key_file, 'r') as stream:
+ client_key = stream.read()
+ return client_cert, client_key
+
+
+def install_ca(certificate_authority):
+ '''Install a certificiate authority on the system by calling the
+ update-ca-certificates command.'''
+ name = hookenv.service_name()
+ ca_file = '/usr/local/share/ca-certificates/{0}.crt'.format(name)
+ hookenv.log('Writing CA to {0}'.format(ca_file))
+ # Write the contents of certificate authority to the file.
+ with open(ca_file, 'w') as fp:
+ fp.write(certificate_authority)
+ # Update the trusted CAs on this system.
+ check_call(['update-ca-certificates'])
+ message = 'Generated ca-certificates.crt for {0}'.format(name)
+ hookenv.log(message)
+
+
+def get_sans(address_list=[]):
+ '''Return a string suitable for the easy-rsa subjectAltNames.'''
+ sans = []
+ for address in address_list:
+ if _is_ip(address):
+ sans.append('IP:{0}'.format(address))
+ else:
+ sans.append('DNS:{0}'.format(address))
+ return ','.join(sans)
+
+
+def get_version(path):
+ '''Return the version of EasyRSA by investigating the tar file.'''
+ # Create a command that lists the tar file.
+ cmd = 'tar -tf {0}'.format(path)
+ # Get the listing of the directories and files in the tar file.
+ output = check_output(split(cmd)).decode('utf-8')
+ # Get the first listing which is the directory.
+ directory = output.splitlines()[0]
+ # Remove the path separator from the string.
+ if '/' in directory:
+ directory = directory.replace('/', '')
+ # Get the version by splitting on the hypen.
+ return directory.split('-')[1]
+
+
+def _is_ip(address):
+ '''Return True if the address is an IP address, false otherwise.'''
+ import ipaddress
+ try:
+ # This method will raise a ValueError if argument is not an IP address.
+ ipaddress.ip_address(address)
+ return True
+ except ValueError:
+ return False
diff --git a/easyrsa/reactive/leadership.py b/easyrsa/reactive/leadership.py
new file mode 100644
index 0000000..29c6f3a
--- /dev/null
+++ b/easyrsa/reactive/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.leadership import leader_get, leader_set
+
+
+__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
+
+
+def initialize_leadership_state():
+ '''Initialize leadership.* states from the hook environment.
+
+ Invoked by hookenv.atstart() so states are available in
+ @hook decorated handlers.
+ '''
+ is_leader = hookenv.is_leader()
+ if is_leader:
+ hookenv.log('Initializing Leadership Layer (is leader)')
+ else:
+ hookenv.log('Initializing Leadership Layer (is follower)')
+
+ reactive.helpers.toggle_state('leadership.is_leader', is_leader)
+
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+ current = hookenv.leader_get()
+
+ # Handle deletions.
+ for key in set(previous.keys()) - set(current.keys()):
+ current[key] = None
+
+ any_changed = False
+ for key, value in current.items():
+ reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
+ value != previous.get(key))
+ if value != previous.get(key):
+ any_changed = True
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ reactive.helpers.toggle_state('leadership.changed', any_changed)
+
+ unitdata.kv().update(current, prefix='leadership.settings.')
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(reactive, '_leadership_registered'):
+ hookenv.atstart(initialize_leadership_state)
+ reactive._leadership_registered = True
diff --git a/easyrsa/reactive/status.py b/easyrsa/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/easyrsa/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/easyrsa/requirements.txt b/easyrsa/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/easyrsa/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/easyrsa/revision b/easyrsa/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/easyrsa/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/easyrsa/tox.ini b/easyrsa/tox.ini
new file mode 100644
index 0000000..e2b841a
--- /dev/null
+++ b/easyrsa/tox.ini
@@ -0,0 +1,14 @@
+[tox]
+envlist = py3
+skipsdist = true
+
+[testenv]
+basepython=python3
+envdir={toxworkdir}/py3
+deps=
+ pytest
+ charms.reactive
+ pydoc-markdown
+
+[testenv:docs]
+commands=python make_docs
diff --git a/easyrsa/version b/easyrsa/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/easyrsa/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/easyrsa/wheelhouse.txt b/easyrsa/wheelhouse.txt
new file mode 100644
index 0000000..c2337ba
--- /dev/null
+++ b/easyrsa/wheelhouse.txt
@@ -0,0 +1,16 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
diff --git a/easyrsa/wheelhouse/Jinja2-2.10.1.tar.gz b/easyrsa/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/easyrsa/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/easyrsa/wheelhouse/MarkupSafe-1.1.1.tar.gz b/easyrsa/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/easyrsa/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/easyrsa/wheelhouse/PyYAML-5.2.tar.gz b/easyrsa/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/easyrsa/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/easyrsa/wheelhouse/Tempita-0.5.2.tar.gz b/easyrsa/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/easyrsa/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz b/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/easyrsa/wheelhouse/charms.reactive-1.4.1.tar.gz b/easyrsa/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/easyrsa/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/easyrsa/wheelhouse/netaddr-0.7.19.tar.gz b/easyrsa/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/easyrsa/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/easyrsa/wheelhouse/pbr-5.6.0.tar.gz b/easyrsa/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/easyrsa/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/easyrsa/wheelhouse/pip-18.1.tar.gz b/easyrsa/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/easyrsa/wheelhouse/pip-18.1.tar.gz differ
diff --git a/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz b/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/easyrsa/wheelhouse/setuptools-41.6.0.zip b/easyrsa/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/easyrsa/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/easyrsa/wheelhouse/setuptools_scm-1.17.0.tar.gz b/easyrsa/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/easyrsa/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/easyrsa/wheelhouse/six-1.15.0.tar.gz b/easyrsa/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/easyrsa/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/easyrsa/wheelhouse/wheel-0.33.6.tar.gz b/easyrsa/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/easyrsa/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/etcd/.build.manifest b/etcd/.build.manifest
new file mode 100644
index 0000000..d4b891e
--- /dev/null
+++ b/etcd/.build.manifest
@@ -0,0 +1,966 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "023c67941e18663a4df49f53edba809f43ba5069",
+ "url": "layer:cis-benchmark"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
+ "url": "layer:leadership"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e",
+ "url": "layer:nagios"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fb46dec78d390571753d21876bbba689bbbca9e4",
+ "url": "layer:tls-client"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "85d7cc4f7180d19df20e264358e920004cec192b",
+ "url": "layer:snap"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688",
+ "url": "layer:cdk-service-kicker"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "53d38096a6de8d4bcc18a2cb64a94d904c496660",
+ "url": "etcd"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc",
+ "url": "interface:nrpe-external-master"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d9850016d930a6d507b9fd45e2598d327922b140",
+ "url": "interface:tls-certificates"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382",
+ "url": "interface:etcd"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "71b16123e38d9f8e2a38558e4f057f5071e56daa",
+ "url": "interface:etcd-proxy"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/tox.yaml": [
+ "etcd",
+ "static",
+ "c323f9ca1fe5bf1369f80d8958be49ad8fd2f6635528865017c357591d31542e"
+ ],
+ ".gitignore": [
+ "etcd",
+ "static",
+ "afd9068d500f350297456d66242056a905510c335ed0e40319b8c0c81eb7cd9f"
+ ],
+ ".travis.yml": [
+ "layer:cis-benchmark",
+ "static",
+ "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "CONTRIBUTING.md": [
+ "etcd",
+ "static",
+ "dc83e4e868d1dbe5b1404faf736d556895a7d4ca9be3bff2d1fdebc0036993d6"
+ ],
+ "LICENSE": [
+ "etcd",
+ "static",
+ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
+ ],
+ "Makefile": [
+ "etcd",
+ "static",
+ "4ccdffd0b1232b3451c89c8f48c456a4b831341189e3a913c4ca6ed5f309e261"
+ ],
+ "README.md": [
+ "etcd",
+ "static",
+ "3fa439ec5175369f29cba6dc4624239d615c74cba6933591b900c30c91d7928d"
+ ],
+ "actions.yaml": [
+ "etcd",
+ "dynamic",
+ "492793576dffc7daf5a68f0bb64fc8e12263f64727e3a6ee2da2c87ce528211d"
+ ],
+ "actions/actions.py": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/alarm-disarm": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/alarm-list": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/cis-benchmark": [
+ "layer:cis-benchmark",
+ "static",
+ "fd3c1b8ba478b7f933605897ace8ae9f3ee102d9992f46f1e36d95eb1b094b84"
+ ],
+ "actions/compact": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "actions/defrag": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/health": [
+ "etcd",
+ "static",
+ "1b339ab870980eb1bcf936849132385fa7922c790daed50a138b2e221544043e"
+ ],
+ "actions/install": [
+ "etcd",
+ "static",
+ "21c4fd80371a6141c2f23fad992dc7e22d82ce7ca425d1c7a95539e1ceb87873"
+ ],
+ "actions/package-client-credentials": [
+ "etcd",
+ "static",
+ "8ac1782a8beffb4fa2acc6e7b7766bad020b00e1f68b4005bbfc19871d3a1e8c"
+ ],
+ "actions/restore": [
+ "etcd",
+ "static",
+ "42a3f7920a575d0544172ea3e84d8cbb34472974cde6c57b8b289429f15f0c6b"
+ ],
+ "actions/restore.py": [
+ "etcd",
+ "static",
+ "663d9b6e919d563e925c1123c84e7ff58903e34b8543e073c97cdaf9ede7ecd4"
+ ],
+ "actions/snap-upgrade": [
+ "etcd",
+ "static",
+ "bc24d96c526178136aa9590a87abb019a47d9fecd7b0533fea506a2c3c4fe97b"
+ ],
+ "actions/snap-upgrade.py": [
+ "etcd",
+ "static",
+ "4178275ff6c66fc590d14ff0e219f30d65d6eae6b7d262071ea859ff11ab343d"
+ ],
+ "actions/snapshot": [
+ "etcd",
+ "static",
+ "bcc59597a1cbc4a5d6a7679b154c6e30317853e4ad9fdd09ee0152dda2a9df59"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "etcd",
+ "dynamic",
+ "32906689e87266fb2a85995aeb8ab6120d9735b38a2b1a13f29d9319f1a03854"
+ ],
+ "copyright": [
+ "etcd",
+ "static",
+ "b2c9a95e9606d77b139aed037ad5afd36d0b58268465c0e6b7960a562a322231"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-leadership": [
+ "layer:leadership",
+ "static",
+ "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
+ ],
+ "copyright.layer-nagios": [
+ "layer:nagios",
+ "static",
+ "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-snap": [
+ "layer:snap",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "copyright.layer-status": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/etcd": [
+ "etcd",
+ "static",
+ "3d6fbb2eba196a9b6c3f76c3f7b22ab10ddd27892501db24cdda01e6cb9afdbf"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "debug-scripts/tls-certs": [
+ "layer:tls-client",
+ "static",
+ "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "hooks/certificates-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/data-storage-attached": [
+ "etcd",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/data-storage-detaching": [
+ "etcd",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/db-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/db-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/db-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/db-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/db-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/proxy-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/proxy-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/proxy-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/proxy-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/proxy-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/etcd-proxy/.gitignore": [
+ "interface:etcd-proxy",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/etcd-proxy/README.md": [
+ "interface:etcd-proxy",
+ "static",
+ "b880661604a91c383aacea3289888e30427bb8ec83c5987d2ce7fac364e48cc7"
+ ],
+ "hooks/relations/etcd-proxy/__init__.py": [
+ "interface:etcd-proxy",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/etcd-proxy/interface.yaml": [
+ "interface:etcd-proxy",
+ "static",
+ "672792aa20fe5af4a7ba9cbe61f3f40426c783bca5149834e4dc57a636cc1185"
+ ],
+ "hooks/relations/etcd-proxy/provides.py": [
+ "interface:etcd-proxy",
+ "static",
+ "d87de0262566cb4bd66f8fff96d080c76288f5433046ab5af9464cac3f7e111a"
+ ],
+ "hooks/relations/etcd-proxy/requires.py": [
+ "interface:etcd-proxy",
+ "static",
+ "b1535b36ed16e70b39f05753ae2f3f3acaa7f849609ff2e70a143486583e6ee3"
+ ],
+ "hooks/relations/etcd/.gitignore": [
+ "interface:etcd",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/etcd/README.md": [
+ "interface:etcd",
+ "static",
+ "93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386"
+ ],
+ "hooks/relations/etcd/__init__.py": [
+ "interface:etcd",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/etcd/interface.yaml": [
+ "interface:etcd",
+ "static",
+ "ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be"
+ ],
+ "hooks/relations/etcd/peers.py": [
+ "interface:etcd",
+ "static",
+ "99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834"
+ ],
+ "hooks/relations/etcd/provides.py": [
+ "interface:etcd",
+ "static",
+ "3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9"
+ ],
+ "hooks/relations/etcd/requires.py": [
+ "interface:etcd",
+ "static",
+ "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
+ ],
+ "hooks/relations/nrpe-external-master/README.md": [
+ "interface:nrpe-external-master",
+ "static",
+ "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636"
+ ],
+ "hooks/relations/nrpe-external-master/__init__.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/nrpe-external-master/interface.yaml": [
+ "interface:nrpe-external-master",
+ "static",
+ "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0"
+ ],
+ "hooks/relations/nrpe-external-master/provides.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e6ba708d05b227b139a86be59c83ed95a2bad030bc81e5819167ba5e1e67ecd4"
+ ],
+ "hooks/relations/nrpe-external-master/requires.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/.gitignore": [
+ "interface:tls-certificates",
+ "static",
+ "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc"
+ ],
+ "hooks/relations/tls-certificates/README.md": [
+ "interface:tls-certificates",
+ "static",
+ "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a"
+ ],
+ "hooks/relations/tls-certificates/__init__.py": [
+ "interface:tls-certificates",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/docs/common.md": [
+ "interface:tls-certificates",
+ "static",
+ "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff"
+ ],
+ "hooks/relations/tls-certificates/docs/provides.md": [
+ "interface:tls-certificates",
+ "static",
+ "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe"
+ ],
+ "hooks/relations/tls-certificates/docs/requires.md": [
+ "interface:tls-certificates",
+ "static",
+ "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e"
+ ],
+ "hooks/relations/tls-certificates/interface.yaml": [
+ "interface:tls-certificates",
+ "static",
+ "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa"
+ ],
+ "hooks/relations/tls-certificates/make_docs": [
+ "interface:tls-certificates",
+ "static",
+ "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37"
+ ],
+ "hooks/relations/tls-certificates/provides.py": [
+ "interface:tls-certificates",
+ "static",
+ "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c"
+ ],
+ "hooks/relations/tls-certificates/pydocmd.yml": [
+ "interface:tls-certificates",
+ "static",
+ "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8"
+ ],
+ "hooks/relations/tls-certificates/requires.py": [
+ "interface:tls-certificates",
+ "static",
+ "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba"
+ ],
+ "hooks/relations/tls-certificates/tls_certificates_common.py": [
+ "interface:tls-certificates",
+ "static",
+ "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c"
+ ],
+ "hooks/relations/tls-certificates/tox.ini": [
+ "interface:tls-certificates",
+ "static",
+ "7ab8ab53e5ed98cfa7fb5c1d5009f84077a4bb76640ba64f561ef7ea3a702eab"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "etcd",
+ "static",
+ "163dee1040f40db041189c46652ab4e740912b527078995e7ac71bf6c6a08d16"
+ ],
+ "layer.yaml": [
+ "etcd",
+ "dynamic",
+ "359a37ecaba6aa516c993260ae2978f840e2228f5944249fa7a5ea399963e628"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/nagios.py": [
+ "layer:nagios",
+ "static",
+ "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/snap.py": [
+ "layer:snap",
+ "static",
+ "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/charms/layer/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e"
+ ],
+ "lib/charms/leadership.py": [
+ "layer:leadership",
+ "static",
+ "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "lib/etcd_databag.py": [
+ "etcd",
+ "static",
+ "02063f5d0b869b90b9c13c7e77a8e1772ce4dc1d0be24ddbd1072a4086bd4986"
+ ],
+ "lib/etcd_lib.py": [
+ "etcd",
+ "static",
+ "a550f3409eede8c85d1e2bdd86bf32f2ab64b31b6fd321d204aab0f8def78055"
+ ],
+ "lib/etcdctl.py": [
+ "etcd",
+ "static",
+ "26574d901d0bd054c8ff45c3f18e6cbc79b34b61d1ff57165f906c4ed0960700"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "etcd",
+ "dynamic",
+ "373432b73726cb36c0b719ae91a39888d9cb66db3d17703ca57ba7641c327907"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "reactive/__init__.py": [
+ "layer:snap",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/cdk_service_kicker.py": [
+ "layer:cdk-service-kicker",
+ "static",
+ "cc2648443016a18324ecb26acb71d69c71610ba23df235f280383552136f7efc"
+ ],
+ "reactive/etcd.py": [
+ "etcd",
+ "static",
+ "e2e941191031b3632c6457e806aca66796755417b871d50c967b9d78c526e8a9"
+ ],
+ "reactive/leadership.py": [
+ "layer:leadership",
+ "static",
+ "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
+ ],
+ "reactive/snap.py": [
+ "layer:snap",
+ "static",
+ "e4625ff4190ed33625f50d94343eda100871052ef133028f5f0ff1edfa5a23c3"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "reactive/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "setup.py": [
+ "layer:snap",
+ "static",
+ "b219c8c6cb138a2f70a8ef9136d1cc3fe6210bd1e28c99fccb5e7ae90d547164"
+ ],
+ "templates/cdk-service-kicker": [
+ "layer:cdk-service-kicker",
+ "static",
+ "b17adff995310e14d1b510337efa0af0531b55e2c487210168829e0dc1a6f99b"
+ ],
+ "templates/cdk-service-kicker.service": [
+ "layer:cdk-service-kicker",
+ "static",
+ "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029"
+ ],
+ "templates/check_etcd-alarms.cron": [
+ "etcd",
+ "static",
+ "a208def35e82ffde80ddd328d75261cfd3f3fc26cab436c4a2ba35823337a65b"
+ ],
+ "templates/check_etcd-alarms.py": [
+ "etcd",
+ "static",
+ "a5b5e4efa0c59fba1941b6bfb480538bf7988e7c5eded7598f208cb57df42594"
+ ],
+ "templates/etcd2.conf": [
+ "etcd",
+ "static",
+ "5fd82b1b95f337406f12133b82c705ec278696915cf298c2a0256f42647af43a"
+ ],
+ "templates/etcd3.conf": [
+ "etcd",
+ "static",
+ "3ab6570d48daaa95ef87f28db1d333177fb7942f31e8157b3ac71c1ea319b108"
+ ],
+ "templates/service-always-restart.systemd-229.conf": [
+ "etcd",
+ "static",
+ "516958fbf8b9a05cc86f6700d0de7bdc6b2ba1847d69fbe1214e23b52e00b064"
+ ],
+ "templates/service-always-restart.systemd-latest.conf": [
+ "etcd",
+ "static",
+ "37de98817682363d48b3dd2b635f5cfb281533aaa9d3836d1af44f9d6a59984c"
+ ],
+ "tests/10-deploy.py": [
+ "etcd",
+ "static",
+ "5984efd977a45d99f3618037737cf6b4ed0316ab7cc7a84be7f3a70c168d9373"
+ ],
+ "tests/20-actions.py": [
+ "etcd",
+ "static",
+ "4161748b29273598c6aff480853d11fa887d077e20639400e40398c94f5d5505"
+ ],
+ "tests/30-deb-bundle.yml": [
+ "etcd",
+ "static",
+ "58c5118376f1086b3842a6eca01cb7adae8ee983a6ca7f28cb3624aa4a51170b"
+ ],
+ "tests/30-deb-snap-migrate.py": [
+ "etcd",
+ "static",
+ "8f287e2869e3998dc778f1b1cdb7ed6bc7f98a9b58e59379e8e62c5d7f4ff81a"
+ ],
+ "tests/conftest.py": [
+ "etcd",
+ "static",
+ "a9dd13c52cdca36d842eadcbbd4c56c5ca7cbbd298cae97fe866b2f604949d34"
+ ],
+ "tests/snap-upgrade.yaml": [
+ "etcd",
+ "static",
+ "5aaa91cbec3d9ff0024ba90f080a761d26578c30a2c88c6fc76de73c7a4b11ba"
+ ],
+ "tests/tests.yaml": [
+ "etcd",
+ "static",
+ "974fc387c55354f6d9d412c5676a1d990e4d0a7aa79dcc5422ba671631ae75a6"
+ ],
+ "tox.ini": [
+ "etcd",
+ "static",
+ "53e1c829a1c652bb9739d79a206af4f1cb2c9605fb9c2bd590da52012301eb09"
+ ],
+ "unit_tests/test_etcdctl.py": [
+ "etcd",
+ "static",
+ "755b1f55a504862332219addc124ca36f50940831d7d6a2068aa74b42c618198"
+ ],
+ "version": [
+ "etcd",
+ "dynamic",
+ "e769e4fb7e0ce598f5767cab04dbda0b3cd5fce9bea776b97aa6bc80f4cc4999"
+ ],
+ "wheelhouse.txt": [
+ "etcd",
+ "dynamic",
+ "8c850ecab7e9c4a34020262a19101996418d65234d6a9a8a2ace0d58076e7095"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/charms.templating.jinja2-1.0.2.tar.gz": [
+ "etcd",
+ "dynamic",
+ "8193c6a1d40bdb66fe272c359b4e4780501c658acfaf2b1118c4230927815fe2"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "etcd",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/tenacity-7.0.0.tar.gz": [
+ "layer:snap",
+ "dynamic",
+ "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/etcd/.github/workflows/tox.yaml b/etcd/.github/workflows/tox.yaml
new file mode 100644
index 0000000..b07172d
--- /dev/null
+++ b/etcd/.github/workflows/tox.yaml
@@ -0,0 +1,22 @@
+name: Run tests with Tox
+
+on: [push]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.5, 3.6, 3.7, 3.8]
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Tox and any other packages
+ run: pip install tox
+ - name: Run Tox
+ run: tox -e py # Run tox using the version of Python in `PATH`
diff --git a/etcd/.gitignore b/etcd/.gitignore
new file mode 100644
index 0000000..447a867
--- /dev/null
+++ b/etcd/.gitignore
@@ -0,0 +1,9 @@
+resources
+.cache
+.tox
+.DS_Store
+__pycache__
+.coverage
+.unit-state.db
+*.swp
+.cache/*
diff --git a/etcd/.travis.yml b/etcd/.travis.yml
new file mode 100644
index 0000000..66d8e1f
--- /dev/null
+++ b/etcd/.travis.yml
@@ -0,0 +1,7 @@
+language: python
+python:
+ - "3.5"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/etcd/.travis/profile-update.yaml b/etcd/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/etcd/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/etcd/CONTRIBUTING.md b/etcd/CONTRIBUTING.md
new file mode 100644
index 0000000..f0d8d31
--- /dev/null
+++ b/etcd/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, pleas euse the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-etcd/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-etcd.md)
diff --git a/etcd/LICENSE b/etcd/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/etcd/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/etcd/Makefile b/etcd/Makefile
new file mode 100644
index 0000000..eac207f
--- /dev/null
+++ b/etcd/Makefile
@@ -0,0 +1,29 @@
+
+build:
+ charm build -r --no-local-layers
+
+deploy: build
+ juju deploy ${JUJU_REPOSITORY}/builds/etcd
+ juju deploy cs:~containers/easyrsa
+ juju add-relation etcd easyrsa
+
+lint:
+ tox --notest
+ PATH=.tox/py34/bin:.tox/py35/bin flake8 reactive lib
+
+upgrade: build
+ juju upgrade-charm etcd --path=${JUJU_REPOSITORY}/builds/etcd
+
+force: build
+ juju upgrade-charm etcd --path=${JUJU_REPOSITORY}/builds/etcd --force-units
+
+clean:
+ @echo "Cleaning files"
+ @rm -f .coverage .unit-state.db
+ @find . -name "*.pyc" -type f -exec rm -f '{}' \;
+ @find . -name "__pycache__" -type d -prune -exec rm -rf '{}' \;
+ @rm -rf ./.tox
+ @rm -rf ./.pytest_cache
+
+clean-all: clean
+ rm -rf ${JUJU_REPOSITORY}/builds/etcd
diff --git a/etcd/README.md b/etcd/README.md
new file mode 100644
index 0000000..df46bdf
--- /dev/null
+++ b/etcd/README.md
@@ -0,0 +1,21 @@
+# Etcd
+
+Etcd is a highly available distributed key value store that provides a reliable
+way to store data across a cluster of machines. Etcd gracefully handles master
+elections during network partitions and will tolerate machine failure,
+including the master.
+
+Your applications can read and write data into etcd. A simple use-case is to
+store database connection details or feature flags in etcd as key value pairs.
+These values can be watched, allowing your app to reconfigure itself when they
+change.
+
+Advanced uses take advantage of the consistency guarantees to implement
+database master elections or do distributed locking across a cluster of
+workers.
+
+Etcd allows storing data in a distributed hierarchical database with
+observation.
+
+This charm is maintained along with the components of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-etcd).
diff --git a/etcd/actions.yaml b/etcd/actions.yaml
new file mode 100644
index 0000000..b5c2f44
--- /dev/null
+++ b/etcd/actions.yaml
@@ -0,0 +1,99 @@
+"cis-benchmark":
+ "description": |
+ Run the CIS Kubernetes Benchmark against snap-based components.
+ "params":
+ "apply":
+ "type": "string"
+ "default": "none"
+ "description": |
+ Apply remediations to address benchmark failures. The default, 'none',
+ will not attempt to fix any reported failures. Set to 'conservative'
+ to resolve simple failures. Set to 'dangerous' to attempt to resolve
+ all failures.
+
+ Note: Applying any remediation may result in an unusable cluster.
+ "config":
+ "type": "string"
+ "default": "https://github.com/charmed-kubernetes/kube-bench-config/archive/cis-1.5.zip#sha1=811f21dbf6c841bafdbfbd8a21f912ad67582f46"
+ "description": |
+ Archive containing configuration files to use when running kube-bench.
+ The default value is known to be compatible with snap components. When
+ using a custom URL, append '#=' to verify the
+ archive integrity when downloaded.
+ "release":
+ "type": "string"
+ "default": "https://github.com/aquasecurity/kube-bench/releases/download/v0.3.1/kube-bench_0.3.1_linux_amd64.tar.gz#sha256=6616f1373987259285e2f676a225d4a3885cd62b7e7a116102ff2fb445724281"
+ "description": |
+ Archive containing the 'kube-bench' binary to run. The default value
+ points to a stable upstream release. When using a custom URL, append
+ '#=' to verify the archive integrity when
+ downloaded.
+
+ This may also be set to the special keyword 'upstream'. In this case,
+ the action will compile and use a local kube-bench binary built from
+ the master branch of the upstream repository:
+ https://github.com/aquasecurity/kube-bench
+
+"debug":
+ "description": "Collect debug data"
+"alarm-disarm":
+ "description": |
+ Disarm all alarms.
+"alarm-list":
+ "description": |
+ List all alarms.
+"compact":
+ "description": |
+ Compact etcd event history.
+ "params":
+ "revision":
+ "type": "string"
+ "default": ""
+ "description": |
+ Revision to compact to. Leave blank to compact to the latest revision.
+ "physical":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Setting to True will cause the compaction process to exit only after
+ all revisions have been physically removed from the database.
+"defrag":
+ "description": |
+ Defragment the storage of the local etcd member.
+"health":
+ "description": "Report the health of the cluster."
+"package-client-credentials":
+ "description": |
+ Generate a tarball of the client certificates to connect to the cluster
+ remotely.
+"snap-upgrade":
+ "description": |
+ Execute a migration from the apt package to a snap package format.
+ "params":
+ "use-resource":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": "Default to using the resource (offline environments)"
+"snapshot":
+ "description": "Export and compress a backup of the data in the Etcd cluster."
+ "params":
+ "target":
+ "type": "string"
+ "default": "/home/ubuntu/etcd-snapshots"
+ "description": "Location to save the etcd snapshot."
+ "keys-version":
+ "type": "string"
+ "default": "v3"
+ "description": "Version of keys to snapshoot. Allowed values 'v3' or 'v2'."
+"restore":
+ "description": "Restore an etcd cluster's data from a snapshot tarball."
+ "params":
+ "target":
+ "type": "string"
+ "default": "/home/ubuntu"
+ "description": "Path on disk to save any pre-existing data."
+ "skip-backup":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ Dont backup any existing data, and skip directly to data restoration.
diff --git a/etcd/actions/actions.py b/etcd/actions/actions.py
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/actions.py
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/alarm-disarm b/etcd/actions/alarm-disarm
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/alarm-disarm
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/alarm-list b/etcd/actions/alarm-list
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/alarm-list
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/cis-benchmark b/etcd/actions/cis-benchmark
new file mode 100755
index 0000000..3f91dea
--- /dev/null
+++ b/etcd/actions/cis-benchmark
@@ -0,0 +1,371 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+from pathlib import Path
+
+import charms.layer
+import charms.reactive
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler
+from charms.layer import snap
+from charms.reactive import clear_flag, is_flag_set, set_flag
+
+
+BENCH_HOME = '/home/ubuntu/kube-bench'
+BENCH_BIN = '{}/kube-bench'.format(BENCH_HOME)
+BENCH_CFG = '{}/cfg-ck'.format(BENCH_HOME)
+GO_PKG = 'github.com/aquasecurity/kube-bench'
+RESULTS_DIR = '/home/ubuntu/kube-bench-results'
+
+# Remediation dicts associate a failing test with a tuple to fix it.
+# Conservative fixes will probably leave the cluster in a good state.
+# Dangerous fixes will likely break the cluster.
+# Tuple examples:
+# {'1.2.3': ('manual -- we don't know how to auto fix this', None, None)}
+# {'1.2.3': ('cli', 'command to run', None)}
+# {'1.2.3': ('kv', 'snap', {cfg_key: value})}
+CONSERVATIVE = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.21': ('kv', 'kube-apiserver', {'profiling': 'false'}),
+ '1.2.23': ('kv', 'kube-apiserver', {'audit-log-maxage': '30'}),
+ '1.2.24': ('kv', 'kube-apiserver', {'audit-log-maxbackup': '10'}),
+ '1.3.1': ('kv', 'kube-controller-manager', {'terminated-pod-gc-threshold': '500'}),
+ '1.3.2': ('kv', 'kube-controller-manager', {'profiling': 'false'}),
+ '1.4.1': ('kv', 'kube-scheduler', {'profiling': 'false'}),
+
+ # k8s-worker
+ '4.2.2': ('kv', 'kubelet', {'authorization-mode': 'Webhook'}),
+ '4.2.4': ('kv', 'kubelet', {'read-only-port': '0'}),
+ '4.2.6': ('kv', 'kubelet', {'protect-kernel-defaults': 'true'}),
+}
+ADMISSION_PLUGINS = {'enable-admission-plugins': ('PersistentVolumeLabel',
+ 'PodSecurityPolicy,'
+ 'ServiceAccount,'
+ 'NodeRestriction')}
+DANGEROUS = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.2': ('kv', 'kube-apiserver', {'basic-auth-file': None}),
+ '1.2.3': ('kv', 'kube-apiserver', {'token-auth-file': None}),
+ '1.2.7': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.8': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.9': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.14': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.16': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.17': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.18': ('kv', 'kube-apiserver', {'insecure-bind-address': None}),
+ '1.2.19': ('kv', 'kube-apiserver', {'insecure-port': '0'}),
+ '1.2.33': ('manual', None, None),
+ '1.3.6': ('kv', 'kube-controller-manager',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+
+ # k8s-worker
+ '4.2.12': ('kv', 'kubelet',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+}
+
+
+def _fail(msg):
+ '''Fail the action with a given message.'''
+ hookenv.action_fail(msg)
+ sys.exit()
+
+
+def _move_matching_parent(dirpath, filename, dest):
+ '''Move a parent directory that contains a specific file.
+
+ Helper function that walks a directory looking for a given file. If found,
+ the file's parent directory is moved to the given destination.
+
+ :param: dirpath: String path to search
+ :param: filename: String file to find
+ :param: dest: String destination of the found parent directory
+ '''
+ for root, _, files in os.walk(dirpath):
+ for name in files:
+ if name == filename:
+ hookenv.log('Moving {} to {}'.format(root, dest))
+ shutil.move(root, dest)
+ return
+ else:
+ _fail('Could not find {} in {}'.format(filename, dirpath))
+
+
+def _restart_charm():
+ '''Set charm-specific flags and call reactive.main().'''
+ app = hookenv.charm_name() or 'unknown'
+ if 'master' in app:
+ hookenv.log('Restarting master')
+ clear_flag('kubernetes-master.components.started')
+ elif 'worker' in app:
+ hookenv.log('Restarting worker')
+ set_flag('kubernetes-worker.restart-needed')
+ elif 'etcd' in app:
+ hookenv.log('No-op: etcd does not need to be restarted')
+ return
+ else:
+ _fail('Unable to determine the charm to restart: {}'.format(app))
+
+ # Invoke reactive so the charm will react to the flags we just managed
+ charms.layer.import_layer_libs()
+ charms.reactive.main()
+
+
+def install(release, config):
+ '''Install kube-bench and related configuration.
+
+ Release and configuration are set via action params. If installing an
+ upstream release, this method will also install 'go' if needed.
+
+ :param: release: Archive URI or 'upstream'
+ :param: config: Archive URI of configuration files
+ '''
+ if Path(BENCH_HOME).exists():
+ shutil.rmtree(BENCH_HOME)
+ fetcher = ArchiveUrlFetchHandler()
+
+ if release == 'upstream':
+ Path(BENCH_HOME).mkdir(parents=True, exist_ok=True)
+
+ # Setup the 'go' environment
+ env = os.environ.copy()
+ go_bin = shutil.which('go', path='{}:/snap/bin'.format(env['PATH']))
+ if not go_bin:
+ snap.install('go', channel='stable', classic=True)
+ go_bin = '/snap/bin/go'
+ go_cache = os.getenv('GOCACHE', '/var/snap/go/common/cache')
+ go_path = os.getenv('GOPATH', '/var/snap/go/common')
+ env['GOCACHE'] = go_cache
+ env['GOPATH'] = go_path
+ Path(go_path).mkdir(parents=True, exist_ok=True)
+
+ # From https://github.com/aquasecurity/kube-bench#installing-from-sources
+ go_cmd = ('{bin} get {pkg} '
+ 'github.com/golang/dep/cmd/dep'.format(bin=go_bin, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+
+ go_cmd = ('{bin} build -o {out} {base}/src/{pkg}'.format(
+ bin=go_bin, out=BENCH_BIN, base=go_path, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+ else:
+ # Fetch the release URI and put it in the right place.
+ archive_path = fetcher.install(source=release)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'kube-bench' belongs in our BENCH_HOME.
+ _move_matching_parent(
+ dirpath=archive_path, filename='kube-bench', dest=BENCH_HOME)
+
+ # Fetch the config URI and put it in the right place.
+ archive_dir = fetcher.install(source=config)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'config.yaml' belongs in our BENCH_CFG.
+ _move_matching_parent(
+ dirpath=archive_dir, filename='config.yaml', dest=BENCH_CFG)
+
+
+def apply(remediations=None):
+ '''Apply remediations to address benchmark failures.
+
+ :param: remediations: either 'conservative' or 'dangerous'
+ '''
+ applied_fixes = 0
+ danger = True if remediations == 'dangerous' else False
+ db = unitdata.kv()
+
+ json_log = report(log_format='json')
+ hookenv.log('Loading JSON from: {}'.format(json_log))
+ try:
+ with open(json_log, 'r') as f:
+ full_json = json.load(f)
+ except Exception:
+ _fail('Failed to load: {}'.format(json_log))
+
+ for test in full_json.get('tests', {}):
+ for result in test.get('results', {}):
+ test_num = result.get('test_number')
+ test_remediation = result.get('remediation')
+ test_status = result.get('status', '')
+
+ if test_status.lower() == 'fail':
+ test_remedy = CONSERVATIVE.get(test_num)
+ if not test_remedy and danger:
+ # no conservative remedy, check dangerous if user wants
+ test_remedy = DANGEROUS.get(test_num)
+ if isinstance(test_remedy, tuple):
+ if test_remedy[0] == 'manual':
+ # we don't know how to autofix; log remediation text
+ hookenv.log('Test {}: unable to auto-apply remedy.\n'
+ 'Manual steps:\n{}'.format(test_num,
+ test_remediation))
+ elif test_remedy[0] == 'cli':
+ cmd = shlex.split(test_remedy[1])
+ try:
+ out = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError:
+ _fail('Test {}: failed to run: {}'.format(test_num, cmd))
+ else:
+ hookenv.log('Test {}: applied remedy: {}\n'
+ 'Output: {}'.format(test_num, cmd, out))
+ applied_fixes += 1
+ elif test_remedy[0] == 'kv':
+ cfg_key = 'cis-' + test_remedy[1]
+ cfg = db.get(cfg_key) or {}
+ cfg.update(test_remedy[2])
+ db.set(cfg_key, cfg)
+
+ hookenv.log('Test {}: updated configuration: {}\n'.format(
+ test_num, cfg))
+ applied_fixes += 1
+ else:
+ hookenv.log('Test {}: remediation is missing'.format(test_num))
+
+ # CLI and KV changes will require a charm restart; do it.
+ if applied_fixes > 0:
+ _restart_charm()
+
+ msg = ('Applied {} remediations. Re-run with "apply=none" to generate a '
+ 'new report.').format(applied_fixes)
+ hookenv.action_set({'summary': msg})
+
+
+def reset():
+ '''Reset any remediations we applied to unitdata.kv().
+
+ This action does not track individual remediations to reset. Therefore,
+ this function unconditionally unsets all 'cis-' prefixed arguments that
+ this action may have set and restarts the relevant charm.
+ '''
+ db = unitdata.kv()
+
+ db.unset('cis-kube-apiserver')
+ db.unset('cis-kube-scheduler')
+ db.unset('cis-kube-controller-manager')
+ db.unset('cis-kubelet')
+ _restart_charm()
+
+ hookenv.action_set({'summary': ('Reset is complete. Re-run with '
+ '"apply=none" to generate a new report.')})
+
+
+def report(log_format='text'):
+ '''Run kube-bench and report results.
+
+ By default, save the full plain-text results to our RESULTS_DIR and set
+ action output with a summary. This function can also save full results in
+ a machine-friendly json format.
+
+ :param: log_format: String determines if output is text or json
+ :returns: Path to results log
+ '''
+ Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True)
+
+ # Node type is different depending on the charm
+ app = hookenv.charm_name() or 'unknown'
+ version = 'cis-1.5'
+ if 'master' in app:
+ target = 'master'
+ elif 'worker' in app:
+ target = 'node'
+ elif 'etcd' in app:
+ target = 'etcd'
+ else:
+ _fail('Unable to determine the target to benchmark: {}'.format(app))
+
+ # Commands and log names are different depending on the format
+ if log_format == 'json':
+ log_prefix = 'results-json-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} --json run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+ else:
+ log_prefix = 'results-text-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ summary_cmd = ('{bin} -D {cfg} --benchmark {ver} '
+ '--noremediations --noresults run --targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ # Store full results for future consumption
+ with tempfile.NamedTemporaryFile(mode='w+b', prefix=log_prefix,
+ dir=RESULTS_DIR, delete=False) as res_file:
+ try:
+ subprocess.call(shlex.split(verbose_cmd), stdout=res_file)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(verbose_cmd))
+ else:
+ # remember the filename for later (and make it readable, why not?)
+ Path(res_file.name).chmod(0o644)
+ log = res_file.name
+
+ # When making a summary, we also have a verbose report. Set action output
+ # so operators can see everything related to this run.
+ try:
+ out = subprocess.check_output(shlex.split(summary_cmd),
+ universal_newlines=True)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(summary_cmd))
+ else:
+ fetch_cmd = 'juju scp {unit}:{file} .'.format(unit=hookenv.local_unit(),
+ file=log)
+ hookenv.action_set({'cmd': summary_cmd,
+ 'report': fetch_cmd,
+ 'summary': out})
+
+ return log or None
+
+
+if __name__ == '__main__':
+ if not (is_flag_set('snap.installed.etcd') or
+ is_flag_set('kubernetes-master.snaps.installed') or
+ is_flag_set('kubernetes-worker.snaps.installed')):
+ msg = 'Snaps are not yet installed on this unit.'
+ _fail(msg)
+
+ # Validate action params
+ release = hookenv.action_get('release') or 'upstream'
+ config = hookenv.action_get('config')
+ if not config:
+ msg = 'Missing "config" parameter'
+ _fail(msg)
+ remediations = hookenv.action_get('apply')
+ if remediations not in ['none', 'conservative', 'dangerous', 'reset']:
+ msg = 'Invalid "apply" parameter: {}'.format(remediations)
+ _fail(msg)
+
+ # TODO: may want an option to overwrite an existing install
+ if Path(BENCH_BIN).exists() and Path(BENCH_CFG).exists():
+ hookenv.log('{} exists; skipping install'.format(BENCH_HOME))
+ else:
+ hookenv.log('Installing benchmark from: {}'.format(release))
+ install(release, config)
+
+ # Reset, remediate, or report
+ if remediations == 'reset':
+ hookenv.log('Attempting to remove all remediations')
+ reset()
+ elif remediations != 'none':
+ hookenv.log('Applying "{}" remediations'.format(remediations))
+ apply(remediations)
+ else:
+ hookenv.log('Report only; no remediations were requested')
+ report(log_format='text')
diff --git a/etcd/actions/compact b/etcd/actions/compact
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/compact
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/debug b/etcd/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/etcd/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/etcd/actions/defrag b/etcd/actions/defrag
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/defrag
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/health b/etcd/actions/health
new file mode 100755
index 0000000..c1f4834
--- /dev/null
+++ b/etcd/actions/health
@@ -0,0 +1,144 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import re
+import shlex
+import subprocess
+import sys
+
+from charms import layer
+
+from etcdctl import EtcdCtl
+
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+
+
+CTL = EtcdCtl()
+
+
+def action_fail_now(*args, **kw):
+ '''Call action_fail() and exit immediately.
+
+ '''
+ action_fail(*args, **kw)
+ sys.exit(0)
+
+
+def requires_etcd_version(version_regex, human_version=None):
+ '''Decorator that enforces a specific version of etcdctl be present.
+
+ The decorated function will only be executed if the required version
+ of etcdctl is present. Otherwise, action_fail() will be called and
+ the process will exit immediately.
+
+ '''
+ def wrap(f):
+ def wrapped_f(*args):
+ version = CTL.version()
+ if not re.match(version_regex, version):
+ required_version = human_version or version_regex
+ action_fail_now(
+ 'This action requires etcd version {}'.format(
+ required_version))
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+requires_etcd_v2 = requires_etcd_version(r'2\..*', human_version='2.x')
+requires_etcd_v3 = requires_etcd_version(r'3\..*', human_version='3.x')
+
+
+@requires_etcd_v3
+def alarm_disarm():
+ '''Call `etcdctl alarm disarm`.
+
+ '''
+ try:
+ output = CTL.run('alarm disarm')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def alarm_list():
+ '''Call `etcdctl alarm list`.
+
+ '''
+ try:
+ output = CTL.run('alarm list')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def compact():
+ '''Call `etcdctl compact`.
+
+ '''
+ def get_latest_revision():
+ try:
+ output = CTL.run('endpoint status --write-out json')
+ except subprocess.CalledProcessError as e:
+ action_fail_now(
+ 'Failed to determine latest revision for '
+ 'compaction: {}'.format(e))
+
+ m = re.search(r'"revision":(\d*)', output)
+ if not m:
+ action_fail_now(
+ "Failed to get revision from 'endpoint status' "
+ "output: {}".format(output))
+ return m.group(1)
+
+ revision = action_get('revision') or get_latest_revision()
+ physical = 'true' if action_get('physical') else 'false'
+ command = 'compact {} --physical={}'.format(revision, physical)
+ try:
+ output = CTL.run(command)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+@requires_etcd_v3
+def defrag():
+ '''Call `etcdctl defrag`.
+
+ '''
+ try:
+ output = CTL.run('defrag')
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+def health():
+ '''Call etcdctl cluster-health
+
+ '''
+ try:
+ output = CTL.cluster_health(True)
+ action_set(dict(output=output))
+ except subprocess.CalledProcessError as e:
+ action_fail_now(e.output)
+
+
+if __name__ == '__main__':
+ ACTIONS = {
+ 'alarm-disarm': alarm_disarm,
+ 'alarm-list': alarm_list,
+ 'compact': compact,
+ 'defrag': defrag,
+ 'health': health,
+ }
+
+ action = action_name()
+ ACTIONS[action]()
diff --git a/etcd/actions/install b/etcd/actions/install
new file mode 100755
index 0000000..c4e2fe6
--- /dev/null
+++ b/etcd/actions/install
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -eux
+
+# Install a snap attached from the `etcd` resource.
+
+RESOURCE_PATH=$(resource-get etcd)
+snap install --dangerous $RESOURCE_PATH
+
diff --git a/etcd/actions/package-client-credentials b/etcd/actions/package-client-credentials
new file mode 100755
index 0000000..ed85f93
--- /dev/null
+++ b/etcd/actions/package-client-credentials
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# The certificates live in leader-data. Grab them from there, always
+
+source ~/.bash_aliases
+mkdir -p etcd_credentials
+
+if [ -z ${ETCDCTL_CERT_FILE} ]; then
+ cp $ETCDCTL_CERT etcd_credentials/client.crt
+ cp $ETCDCTL_KEY etcd_credentials/client.key
+ cp $ETCDCTL_CACERT etcd_credentials/ca.crt
+else
+ cp $ETCDCTL_CERT_FILE etcd_credentials/client.crt
+ cp $ETCDCTL_KEY_FILE etcd_credentials/client.key
+ cp $ETCDCTL_CA_FILE etcd_credentials/ca.crt
+fi
+# Render a README heredoc
+cat << EOF > etcd_credentials/README.txt
+# ETCD Credentials Package
+
+Greetings! This credentials package was generated for you by Juju. In order
+to consume these keys, you will need to do a few things first:
+
+Untarball the archive somewhere you wish to keep your sensitive client
+credentials.
+
+Export those locations as environment variables, set the etcdctl endpoint,
+and expose the etcd service. Even though Etcd is currently configured to
+validate SSL certificates before a connection can be established, it's best
+practice to leave it firewalled from the world unless you have need of an
+exposed etcd endpoint.
+
+ juju expose etcd
+ export ETCDCTL_KEY=$(pwd)/client.key
+ export ETCDCTL_CERT=$(pwd)/client.crt
+ export ETCDCTL_CACERT=$(pwd)/ca.crt
+ export ETCDCTL_ENDPOINTS=https://$(unit-get public-address):2379
+ etcdctl member list
+
+If you have any trouble regarding connecting to your Etcd cluster, don't
+hesitate to reach out over the juju mailing list: juju@lists.ubuntu.com
+
+EOF
+
+tar cfz etcd_credentials.tar.gz etcd_credentials
+cp etcd_credentials.tar.gz /home/ubuntu/
+rm -rf etcd_credentials
diff --git a/etcd/actions/restore b/etcd/actions/restore
new file mode 100755
index 0000000..32fa764
--- /dev/null
+++ b/etcd/actions/restore
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+# This runs with the assumption that the code is being executed from $CHARM_DIR
+# and is also working around a stray issue where actions aren't loading $LIB
+PYTHONPATH=$PYTHONPATH:$CHARM_DIR/lib actions/restore.py
diff --git a/etcd/actions/restore.py b/etcd/actions/restore.py
new file mode 100755
index 0000000..0a2aba7
--- /dev/null
+++ b/etcd/actions/restore.py
@@ -0,0 +1,261 @@
+#!/usr/local/sbin/charm-env python3
+
+from charms import layer
+from charms.templating.jinja2 import render
+from charmhelpers.core import unitdata
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import function_fail
+from charmhelpers.core.hookenv import action_get
+from charmhelpers.core.hookenv import action_set
+from charmhelpers.core.hookenv import config
+from charmhelpers.core.hookenv import log
+from charmhelpers.core.hookenv import resource_get
+from charmhelpers.core.hookenv import is_leader
+from charmhelpers.core.hookenv import _run_atstart
+from charmhelpers.core.hookenv import _run_atexit
+from charmhelpers.core.host import chdir
+from charmhelpers.core.host import service_start
+from charmhelpers.core.host import service_stop
+from etcd_lib import get_ingress_address
+from etcdctl import EtcdCtl
+from etcd_databag import EtcdDatabag
+from shlex import split
+from subprocess import check_call
+from subprocess import check_output
+from subprocess import CalledProcessError
+from subprocess import Popen
+from subprocess import PIPE
+from datetime import datetime
+from uuid import uuid4
+import hashlib
+import os
+import sys
+import time
+import yaml
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+_run_atstart()
+
+opts = layer.options('etcd')
+
+DATESTAMP = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
+ARCHIVE = "etcd-data-{}.tar.gz".format(DATESTAMP)
+
+unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '')
+ETCD_DATA_DIR = '{}/{}.etcd'.format(opts['etcd_data_dir'], unit_name)
+if not os.path.isdir(ETCD_DATA_DIR):
+ ETCD_DATA_DIR = opts['etcd_data_dir']
+
+ETCD_PORT = config('management_port')
+CLUSTER_ADDRESS = get_ingress_address('cluster')
+SKIP_BACKUP = action_get('skip-backup')
+SNAPSHOT_ARCHIVE = resource_get('snapshot')
+TARGET_PATH = action_get('target')
+
+
+def preflight_check():
+ ''' Check preconditions for data restoration '''
+ if not is_leader():
+ function_fail('This action can only be run on the leader unit')
+ sys.exit(0)
+ if not SNAPSHOT_ARCHIVE:
+ function_fail({'result.failed': 'Missing snapshot. See: README.md'})
+ sys.exit(0)
+
+
+def render_backup():
+ ''' Backup existing data in the event of restoration on a dirty unit. '''
+ if not os.path.isdir(ETCD_DATA_DIR) and SKIP_BACKUP:
+ msg = "Backup set to True, but no data found to backup"
+ action_set({'backup.error': msg})
+ if not os.path.isdir(ETCD_DATA_DIR):
+ return
+
+ with chdir(ETCD_DATA_DIR):
+ if not SKIP_BACKUP:
+ log('Backing up existing data found in {}'.format(ETCD_DATA_DIR))
+ archive_path = "{}/{}".format(TARGET_PATH, ARCHIVE)
+ cmd = 'tar cvf {0} {1}'.format(archive_path, '.')
+ check_call(split(cmd))
+ backup_sum = shasum_file(archive_path)
+ action_set({'backup.path': archive_path,
+ 'backup.sha256sum': backup_sum})
+
+
+def unpack_resource():
+ ''' Grab the resource path, and unpack it into $PATH '''
+ cmd = "tar xvf {0} -C {1}".format(SNAPSHOT_ARCHIVE, ETCD_DATA_DIR)
+ check_call(split(cmd))
+
+
+def is_v3_backup():
+ ''' See if the backup file contains a db file indicating a v3 backup '''
+ cmd = "tar -tvf {0} --wildcards '*/db'".format(SNAPSHOT_ARCHIVE)
+ try:
+ check_call(split(cmd))
+ except CalledProcessError:
+ return False
+ return True
+
+
+def restore_v3_backup():
+ ''' Apply a v3 backup '''
+ cmd = "mkdir -p /root/tmp/restore-v3"
+ check_call(split(cmd))
+
+ cmd = "tar xvf {0} -C /root/tmp/restore-v3".format(SNAPSHOT_ARCHIVE)
+ check_call(split(cmd))
+
+ configfile = open('/var/snap/etcd/common/etcd.conf.yml', "r")
+ config = yaml.safe_load(configfile)
+ # Use the insecure 4001 port we have open in our deployment
+ environ = dict(os.environ, ETCDCTL_API="3")
+ cmd = "/snap/bin/etcdctl --endpoints=http://localhost:4001 snapshot " \
+ "restore /root/tmp/restore-v3/db --skip-hash-check " \
+ "--data-dir='/root/tmp/restore-v3/etcd' " \
+ "--initial-cluster='{}' --initial-cluster-token='{}' " \
+ "--initial-advertise-peer-urls='{}' --name='{}'"
+
+ if 'initial-cluster' in config and config['initial-cluster']:
+ # configuration contains initilization params
+ cmd = cmd.format(config['initial-cluster'],
+ config['initial-cluster-token'],
+ config['initial-advertise-peer-urls'],
+ config['name'])
+ else:
+ # configuration does not contain initilization params
+ # probably coming from an etcd upgrades from etcd2
+ initial_cluster = '{}=https://{}:2380'.format(config['name'], CLUSTER_ADDRESS)
+ initial_cluster_token = CLUSTER_ADDRESS
+ initial_urls = 'https://{}:2380'.format(CLUSTER_ADDRESS)
+ cmd = cmd.format(initial_cluster,
+ initial_cluster_token,
+ initial_urls,
+ config['name'])
+
+ configfile.close()
+ check_call(split(cmd), env=environ)
+
+ # Make sure we do not have anything left from any old deployments
+ cmd = "rm -rf {}/member".format(config['data-dir'])
+ check_call(split(cmd))
+
+ cmd = "cp -r /root/tmp/restore-v3/etcd/member {}".format(config['data-dir'])
+ check_call(split(cmd))
+
+ # Clean up
+ cmd = "rm -rf /root/tmp/restore-v3"
+ check_call(split(cmd))
+
+
+def start_etcd_forked():
+ ''' Start the etcd daemon temporarily to initiate new cluster details '''
+ raw = "/snap/etcd/current/bin/etcd -data-dir={0} -force-new-cluster"
+ cmd = raw.format(ETCD_DATA_DIR)
+ proc = Popen(split(cmd), stdout=PIPE, stderr=PIPE)
+ return proc.pid
+
+
+def pkill_etcd(pid=''):
+ ''' Kill the temporary forked etcd daemon '''
+ # cmd = 'pkill etcd'
+ if pid:
+ cmd = 'kill -9 {}'.format(pid)
+ else:
+ cmd = 'pkill etcd'
+
+ check_call(split(cmd))
+
+
+def probe_forked_etcd():
+ ''' Block until the forked etcd instance has started and return'''
+ output = b""
+ loop = 0
+ MAX_WAIT = 10
+
+ while b"http://localhost" not in output:
+ try:
+ output = check_output(split('/snap/bin/etcd.etcdctl member list'))
+ loop = loop + 1
+ except:
+ log('Still waiting on forked etcd instance...')
+ output = b""
+ loop = loop + 1
+ time.sleep(1)
+ if loop > MAX_WAIT:
+ raise TimeoutError("Timed out waiting for forked etcd.")
+
+
+def reconfigure_client_advertise():
+ ''' Reconfigure the backup to use host network addresses for client advertise
+ instead of the assumed localhost addressing '''
+ cmd = "/snap/bin/etcd.etcdctl member list"
+ members = check_output(split(cmd))
+ member_id = members.split(b':')[0].decode('utf-8')
+
+ raw_update = "/snap/bin/etcd.etcdctl member update {0} http://{1}:{2}"
+ update_cmd = raw_update.format(member_id, CLUSTER_ADDRESS, ETCD_PORT)
+ check_call(split(update_cmd))
+
+
+def shasum_file(filepath):
+ ''' Compute the SHA256sum of a file for verification purposes '''
+ BUF_SIZE = 65536 # 64kb chunk size
+ shasum = hashlib.sha256()
+ with open(filepath, 'rb') as fp:
+ while True:
+ data = fp.read(BUF_SIZE)
+ if not data:
+ break
+ shasum.update(data)
+ return shasum.hexdigest()
+
+
+def dismantle_cluster():
+ """Disconnect other cluster members.
+
+ This is a preparation step before restoring snapshot on the cluster.
+ """
+ log('Disconnecting cluster members')
+ etcdctl = EtcdCtl()
+ etcd_conf = EtcdDatabag()
+
+ my_name = etcd_conf.unit_name
+ endpoint = 'https://{}:{}'.format(etcd_conf.cluster_address,
+ etcd_conf.port)
+ for name, data in etcdctl.member_list(endpoint).items():
+ if name != my_name:
+ log('Disconnecting {}'.format(name), hookenv.DEBUG)
+ etcdctl.unregister(data['unit_id'], endpoint)
+
+ etcd_conf.cluster_state = 'new'
+ conf_path = os.path.join(etcd_conf.etcd_conf_dir, "etcd.conf.yml")
+ render('etcd3.conf', conf_path, etcd_conf.__dict__, owner='root',
+ group='root')
+
+
+def rebuild_cluster():
+ """Signal other etcd units to rejoin new cluster."""
+ log('Requesting peer members to rejoin cluster')
+ rejoin_request = uuid4().hex
+ hookenv.leader_set(force_rejoin=rejoin_request)
+
+
+if __name__ == '__main__':
+ log('Performing etcd snapshot restore')
+ preflight_check()
+ render_backup()
+ dismantle_cluster()
+ service_stop(opts['etcd_daemon_process'])
+ if is_v3_backup():
+ restore_v3_backup()
+ else:
+ unpack_resource()
+ pid = start_etcd_forked()
+ probe_forked_etcd()
+ reconfigure_client_advertise()
+ pkill_etcd(pid)
+ service_start(opts['etcd_daemon_process'])
+ rebuild_cluster()
+ _run_atexit()
diff --git a/etcd/actions/snap-upgrade b/etcd/actions/snap-upgrade
new file mode 100755
index 0000000..1da3eab
--- /dev/null
+++ b/etcd/actions/snap-upgrade
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+# This runs with the assumption that the code is being executed from $CHARM_DIR
+# and is also working around a stray issue where actions aren't loading $LIB
+PYTHONPATH=$PYTHONPATH:$CHARM_DIR/lib actions/snap-upgrade.py
diff --git a/etcd/actions/snap-upgrade.py b/etcd/actions/snap-upgrade.py
new file mode 100755
index 0000000..6bfdd6a
--- /dev/null
+++ b/etcd/actions/snap-upgrade.py
@@ -0,0 +1,178 @@
+#!/usr/local/sbin/charm-env python3
+
+from charms.layer import snap
+from charmhelpers.core import unitdata
+from charmhelpers.core.hookenv import action_get
+from charmhelpers.core.hookenv import action_set
+from charmhelpers.core.hookenv import action_fail
+from charmhelpers.core.hookenv import config
+from charmhelpers.core.hookenv import log
+from charms.reactive import is_state
+from charms.reactive import remove_state
+from charms.reactive import set_state
+
+# from charmhelpers.core.host import chdir
+
+from datetime import datetime
+from subprocess import call
+from subprocess import check_call
+from subprocess import CalledProcessError
+
+from shlex import split
+
+import os
+import shutil
+import sys
+import tempfile
+
+
+# Define some dict's containing paths of files we expect to see in
+# scenarios
+
+deb_paths = {'config': ['/etc/ssl/etcd/ca.crt',
+ '/etc/ssl/etcd/server.crt',
+ '/etc/ssl/etcd/server.key',
+ '/etc/ssl/etcd/client.crt',
+ '/etc/ssl/etcd/client.key',
+ '/etc/default/etcd'],
+ 'data': ['/var/lib/etcd/default']}
+
+# Snappy only cares about the config objects. Data validation will come
+# at a later date. We can etcdctl ls / and then verify the data made it
+# post migration.
+snap_paths = {'config': ['/var/snap/etcd/common/etcd.conf',
+ '/var/snap/etcd/common/server.crt',
+ '/var/snap/etcd/common/server.key',
+ '/var/snap/etcd/common/ca.crt'],
+ 'client': ['/var/snap/etcd/common/client.crt',
+ '/var/snap/etcd/common/client.key'],
+ 'common': '/var/snap/etcd/common'}
+
+
+def create_migration_backup(backup_package=''):
+ ''' Backup existing Etcd config/data paths if found and create a
+ tarball consisting of that discovered configuration '''
+
+ datestring = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
+
+ if not backup_package:
+ pkg = '/home/ubuntu/etcd_migration_{}'
+ backup_package = pkg.format(datestring)
+
+ if os.path.exists(backup_package):
+ msg = 'Backup package exists: {}'.format(backup_package)
+ action_set({'fail.message': msg})
+ return False
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ # Create a temporary path to perform the backup, and date the contents.
+ dated_path = "{0}/etcd_migration_{1}".format(tmpdir, datestring)
+ os.makedirs(dated_path)
+
+ # backup all the configuration data
+ for p in deb_paths['config']:
+ if os.path.exists(p):
+ shutil.copy(p, dated_path)
+ else:
+ log('Skipping copy for: {} - file not found'.format(p), 'WARN')
+
+ # backup the actual state of etcd's data
+ for p in deb_paths['data']:
+ if os.path.exists(p):
+ cmd = 'rsync -avzp {} {}'.format(p, dated_path)
+ check_call(split(cmd))
+
+ try:
+ # Create the tarball in its final location
+ shutil.make_archive(backup_package, 'gztar', tmpdir)
+ except Exception as ex:
+ action_set({'fail.message': ex.message})
+ return False
+ log('Created backup {}'.format(backup_package))
+ return True
+
+
+def install_snap(channel, classic=False):
+ ''' Handle installation of snaps, both from resources and from the snap
+ store. The only indicator we need is classic mode and the channel '''
+ snap.install('etcd', channel=channel, classic=classic)
+
+
+def deb_to_snap_migration():
+ has_migrated = has_migrated_from_deb()
+ if not has_migrated:
+ try:
+ cmd = '/snap/bin/etcd.ingest'
+ check_call(split(cmd))
+ except CalledProcessError as cpe:
+ log('Error encountered during ingest.', 'ERROR')
+ log('Error message: {}'.format(cpe.message))
+ action_fail('Migration failed')
+
+ for key_path in snap_paths['client']:
+ chmod = "chmod 644 {}".format(key_path)
+ call(split(chmod))
+ cmod = "chmod 755 {}".format(snap_paths['common'])
+ call(split(cmod))
+
+
+def purge_deb_files():
+ probe_package_command = 'dpkg --list etcd'
+ return_code = call(split(probe_package_command))
+ if return_code != 0:
+ # The return code from dpkg --list when the package is
+ # non existant
+ action_set({'dpkg.list.message': 'dpkg probe return_code > 0',
+ 'skip.package.purge': 'True'})
+ return
+ log('Purging deb configuration files post migration', 'INFO')
+ cmd = 'apt-get purge -y etcd'
+ try:
+ check_call(split(cmd))
+ except CalledProcessError as cpe:
+ action_fail({'apt.purge.message': cpe.message})
+
+ for f in deb_paths['config']:
+ try:
+ log('Removing file {}'.format(f), 'INFO')
+ os.remove(f)
+ except FileNotFoundError:
+ k = 'purge.missing.{}'.format(os.path.basename(f))
+ msg = 'Did not purge {}. File not found.'.format(f)
+ action_set({k: msg})
+ except:
+ k = 'purge.error.{}'.format(f)
+ msg = 'Failed to purge {}. Manual removal required.'.format(k)
+ action_set({k: msg})
+
+
+def has_migrated_from_deb():
+ for p in snap_paths['config']:
+ # helpful when debugging
+ log("Scanning for file: {} {}".format(p, os.path.exists(p)), 'DEBUG')
+ if not os.path.exists(p):
+ return False
+ return True
+
+if __name__ == '__main__':
+ # Control flow of the action
+ backup_package = action_get('target')
+ backup = action_get('backup')
+ channel = config('channel')
+
+ if backup:
+ backup_status = create_migration_backup(backup_package)
+ if not backup_status:
+ action_fail('Failed creating the backup. Refusing to proceed.')
+ sys.exit(0)
+
+ if not is_state('etcd.deb.migrated'):
+ install_snap('ingest/stable', True)
+ deb_to_snap_migration()
+ install_snap(channel, False)
+ purge_deb_files()
+ remove_state('etcd.installed')
+ set_state('snap.installed.etcd')
+ remove_state('etcd.pillowmints')
+ unitdata.kv().flush()
+ call(['hooks/config-changed'])
diff --git a/etcd/actions/snapshot b/etcd/actions/snapshot
new file mode 100755
index 0000000..35ce3cf
--- /dev/null
+++ b/etcd/actions/snapshot
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -ex
+
+# Snippet from: https://coreos.com/etcd/docs/latest/admin_guide.html
+# Snapshot a running etcd cluster data.
+# This command will rewrite some of the metadata contained in the backup
+# (specifically, the node ID and cluster ID), which means that the node will
+# lose its former identity. In order to recreate a cluster from the backup, you
+# will need to start a new, single-node cluster. The metadata is rewritten to
+# prevent the new node from inadvertently being joined onto an existing cluster.
+
+ETCD_BACKUP_TARGET_DIR=$(action-get target)
+ETCD_KEYS_VERSION=$(action-get keys-version)
+#ETCD_DATA_DIR=/var/lib/etcd/default
+UNIT_NAME=${JUJU_UNIT_NAME%%/*}
+UNIT_NUM=${JUJU_UNIT_NAME#*/}
+ETCD_DATA_DIR=/var/snap/etcd/current/$UNIT_NAME$UNIT_NUM.etcd/
+if [ ! -d "$ETCD_DATA_DIR" ]; then
+ ETCD_DATA_DIR=/var/snap/etcd/current/
+fi
+
+DATE_STAMP=$(date +%Y-%m-%d-%H.%M.%S)
+ARCHIVE=etcd-snapshot-$DATE_STAMP.tar.gz
+
+# Ensure the backupd target exists
+mkdir -p $ETCD_BACKUP_TARGET_DIR/$JUJU_ACTION_UUID
+
+if [ "${ETCD_KEYS_VERSION}" == "v2" ]; then
+ # Dump the data currently in the cluster
+ /snap/bin/etcd.etcdctl backup --data-dir $ETCD_DATA_DIR --backup-dir $ETCD_BACKUP_TARGET_DIR/$JUJU_ACTION_UUID
+elif [ "${ETCD_KEYS_VERSION}" == "v3" ]; then
+ mkdir -p $ETCD_BACKUP_TARGET_DIR/$JUJU_ACTION_UUID
+ cp $ETCD_DATA_DIR/member/snap/db $ETCD_BACKUP_TARGET_DIR/$JUJU_ACTION_UUID
+else
+ action-fail "keys-version must be either v2 or v3"
+ exit
+fi
+
+# Create the backup archive
+cd $ETCD_BACKUP_TARGET_DIR/$JUJU_ACTION_UUID
+tar cvfz ../$ARCHIVE .
+
+# keep things tidy
+cd ..
+rm -rf $JUJU_ACTION_UUID
+
+action-set snapshot.path="$ETCD_BACKUP_TARGET_DIR/$ARCHIVE"
+action-set snapshot.size="$(du -h $ETCD_BACKUP_TARGET_DIR/$ARCHIVE | cut -d$'\t' -f1)"
+action-set snapshot.sha256="$(sha256sum $ETCD_BACKUP_TARGET_DIR/$ARCHIVE | cut -d' ' -f1)"
+action-set copy.cmd="juju scp $JUJU_UNIT_NAME:$ETCD_BACKUP_TARGET_DIR/$ARCHIVE ."
+action-set snapshot.version="$(/snap/bin/etcd.etcdctl version)"
diff --git a/etcd/bin/charm-env b/etcd/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/etcd/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/etcd/bin/layer_option b/etcd/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/etcd/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/etcd/config.yaml b/etcd/config.yaml
new file mode 100644
index 0000000..8e06437
--- /dev/null
+++ b/etcd/config.yaml
@@ -0,0 +1,48 @@
+"options":
+ "nagios_context":
+ "default": "juju"
+ "type": "string"
+ "description": |
+ Used by the nrpe subordinate charms.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ "nagios_servicegroups":
+ "default": ""
+ "type": "string"
+ "description": |
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
+ "snapd_refresh":
+ "default": "max"
+ "type": "string"
+ "description": |
+ How often snapd handles updates for installed snaps. Set to an empty
+ string to check 4x per day. Set to "max" (the default) to check once per
+ month based on the charm deployment date. You may also set a custom
+ string as described in the 'refresh.timer' section here:
+ https://forum.snapcraft.io/t/system-options/87
+ "port":
+ "type": "int"
+ "default": !!int "2379"
+ "description": "Port to run the public ETCD service on"
+ "management_port":
+ "type": "int"
+ "default": !!int "2380"
+ "description": "Port to run the ETCD Management service"
+ "channel":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ The snap channel from which to install etcd (e.g. '3.3/stable'), or 'auto'
+ to accept the charm default. Choosing 'auto' will install the latest
+ supported version of etcd at deploy time, but will not automatically upgrade
+ to a newer version thereafter.
+ "bind_to_all_interfaces":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ The service binds to all network interfaces if true. The service binds
+ only to the first found bind address of each relation if false
diff --git a/etcd/copyright b/etcd/copyright
new file mode 100644
index 0000000..3306630
--- /dev/null
+++ b/etcd/copyright
@@ -0,0 +1,13 @@
+Copyright 2015 Canonical LTD
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/etcd/copyright.layer-basic b/etcd/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/etcd/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/etcd/copyright.layer-leadership b/etcd/copyright.layer-leadership
new file mode 100644
index 0000000..08b983f
--- /dev/null
+++ b/etcd/copyright.layer-leadership
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Leadership Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/etcd/copyright.layer-nagios b/etcd/copyright.layer-nagios
new file mode 100644
index 0000000..c80db95
--- /dev/null
+++ b/etcd/copyright.layer-nagios
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2016, Canonical Ltd.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 3, as
+ published by the Free Software Foundation.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranties of
+ MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
diff --git a/etcd/copyright.layer-options b/etcd/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/etcd/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/etcd/copyright.layer-snap b/etcd/copyright.layer-snap
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/etcd/copyright.layer-snap
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/etcd/copyright.layer-status b/etcd/copyright.layer-status
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/etcd/copyright.layer-status
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/etcd/debug-scripts/charm-unitdata b/etcd/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/etcd/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/etcd/debug-scripts/etcd b/etcd/debug-scripts/etcd
new file mode 100755
index 0000000..45e355d
--- /dev/null
+++ b/etcd/debug-scripts/etcd
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+# TODO: Link these explicit system calls against the layer.yaml configuration
+systemctl status snap.etcd.etcd > $DEBUG_SCRIPT_DIR/etcd-systemctl-status
+journalctl -u snap.etcd.etcd > $DEBUG_SCRIPT_DIR/etcd-journal
+
+alias etcdctl="/snap/bin/etcd.etcdctl --cert-file /var/snap/etcd/common/client.crt --key-file /var/snap/etcd/common/client.key --ca-file /var/snap/etcd/common/ca.crt"
+etcdctl cluster-health > $DEBUG_SCRIPT_DIR/etcdctl-cluster-health
+etcdctl ls --recursive > $DEBUG_SCRIPT_DIR/etcdctl-ls
+etcdctl member list > $DEBUG_SCRIPT_DIR/etcdctl-member-list
diff --git a/etcd/debug-scripts/filesystem b/etcd/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/etcd/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/etcd/debug-scripts/juju-logs b/etcd/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/etcd/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/etcd/debug-scripts/juju-network-get b/etcd/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/etcd/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/etcd/debug-scripts/network b/etcd/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/etcd/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/etcd/debug-scripts/packages b/etcd/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/etcd/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/etcd/debug-scripts/sysctl b/etcd/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/etcd/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/etcd/debug-scripts/systemd b/etcd/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/etcd/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/etcd/debug-scripts/tls-certs b/etcd/debug-scripts/tls-certs
new file mode 100755
index 0000000..2692e51
--- /dev/null
+++ b/etcd/debug-scripts/tls-certs
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import shutil
+import traceback
+import debug_script
+from charms import layer
+
+options = layer.options.get('tls-client')
+
+def copy_cert(source_key, name):
+ try:
+ source = options[source_key]
+ dest = os.path.join(debug_script.dir, name)
+ shutil.copy(source, dest)
+ except Exception:
+ traceback.print_exc()
+
+copy_cert('client_certificate_path', 'client.crt')
+copy_cert('server_certificate_path', 'server.crt')
+copy_cert('ca_certificate_path', 'ca.crt')
diff --git a/etcd/docs/status.md b/etcd/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/etcd/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/etcd/hooks/certificates-relation-broken b/etcd/hooks/certificates-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/certificates-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/certificates-relation-changed b/etcd/hooks/certificates-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/certificates-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/certificates-relation-created b/etcd/hooks/certificates-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/certificates-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/certificates-relation-departed b/etcd/hooks/certificates-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/certificates-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/certificates-relation-joined b/etcd/hooks/certificates-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/certificates-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/cluster-relation-broken b/etcd/hooks/cluster-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/cluster-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/cluster-relation-changed b/etcd/hooks/cluster-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/cluster-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/cluster-relation-created b/etcd/hooks/cluster-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/cluster-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/cluster-relation-departed b/etcd/hooks/cluster-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/cluster-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/cluster-relation-joined b/etcd/hooks/cluster-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/cluster-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/config-changed b/etcd/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/data-storage-attached b/etcd/hooks/data-storage-attached
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/data-storage-attached
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/data-storage-detaching b/etcd/hooks/data-storage-detaching
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/data-storage-detaching
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/db-relation-broken b/etcd/hooks/db-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/db-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/db-relation-changed b/etcd/hooks/db-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/db-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/db-relation-created b/etcd/hooks/db-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/db-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/db-relation-departed b/etcd/hooks/db-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/db-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/db-relation-joined b/etcd/hooks/db-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/db-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/hook.template b/etcd/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/install b/etcd/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/leader-elected b/etcd/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/leader-settings-changed b/etcd/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/nrpe-external-master-relation-broken b/etcd/hooks/nrpe-external-master-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/nrpe-external-master-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/nrpe-external-master-relation-changed b/etcd/hooks/nrpe-external-master-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/nrpe-external-master-relation-created b/etcd/hooks/nrpe-external-master-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/nrpe-external-master-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/nrpe-external-master-relation-departed b/etcd/hooks/nrpe-external-master-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/nrpe-external-master-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/nrpe-external-master-relation-joined b/etcd/hooks/nrpe-external-master-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/nrpe-external-master-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/post-series-upgrade b/etcd/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/pre-series-upgrade b/etcd/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/proxy-relation-broken b/etcd/hooks/proxy-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/proxy-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/proxy-relation-changed b/etcd/hooks/proxy-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/proxy-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/proxy-relation-created b/etcd/hooks/proxy-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/proxy-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/proxy-relation-departed b/etcd/hooks/proxy-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/proxy-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/proxy-relation-joined b/etcd/hooks/proxy-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/proxy-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/relations/etcd-proxy/.gitignore b/etcd/hooks/relations/etcd-proxy/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/etcd/hooks/relations/etcd-proxy/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/etcd/hooks/relations/etcd-proxy/README.md b/etcd/hooks/relations/etcd-proxy/README.md
new file mode 100644
index 0000000..48d6a91
--- /dev/null
+++ b/etcd/hooks/relations/etcd-proxy/README.md
@@ -0,0 +1,110 @@
+# Overview
+
+This interface layer handles the communication with Etcd via the `etcd-proxy` interface.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established, but Etcd may not
+ yet have provided any connection or service information.
+
+ * `{relation_name}.available` Etcd has provided its cluster string
+ information, and is ready to handle incoming connections.
+ The provided information can be accessed via the following methods:
+ * `cluster_string()`
+
+ * `{relation_name}.tls.available` Etcd has provided client
+ connection credentials for TLS communication.
+ * `client_ca` - CA certificate
+ * `client_cert` - Client Cert
+ * `client_key` - Client Key
+
+
+For example, a common application for this is configuring an
+applications backend kv storage, like Docker.
+
+```python
+@when('proxy.available')
+def prepare_etcd_proxy(proxy):
+ con_string = proxy.cluster_string()
+ # Save certificates to disk
+ proxy.save_client_credentials('/etc/ssl/etcd')
+ opts = {}
+ opts['cluster_string'] = con_string
+ opts['client_ca'] = '/etc/ssl/etcd/client-ca.pem'
+ opts['client_cert'] = '/etc/ssl/etcd/client-cert.pem'
+ opts['client_key'] = '/etc/ssl/etcd/client-key.pem'
+ render('proxy_systemd_template', '/etc/systemd/system/etcd-proxy.service', opts)
+
+```
+
+
+## Provides
+
+A charm providing this interface is providing the Etcd cluster management
+connection string. This is similar to what ETCD requires when peering, declared as:
+
+```shell
+etcd0=https://192.168.1.2:2380,etcd1=https://192.168.2.22:2380
+```
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` One or more clients of any type
+ have been related. The charm should call the following
+ methods to provide the appropriate information to the clients:
+
+ * `{relation_name}.set_cluster_string()`
+
+ * Additionally to secure the Etcd network connections, All of
+ the client certificate keys must be set, which is conveniently
+ enabled as a method on the interface:
+
+
+#### Example:
+
+```python
+from charmhelpers.core import hookenv
+# this module lives in the etcd charm in lib/etcdctl.py
+import etcdctl
+
+@when('proxy.connected')
+def send_cluster_details(proxy):
+ # ETCD charm provides client keys via leader_data
+ cert = hookenv.leader_get('client_certificate')
+ key = hookenv.leader_get('client_key')
+ ca = hookenv.leader_get('certificate_authority')
+ # set the certificates on the conversation
+ proxy.set_client_credentials(key, cert, ca)
+
+ # format a list of cluster participants
+ etcdctl = etcdctl.EtcdCtl()
+ peers = etcdctl.member_list()
+ cluster = []
+ for peer in peers:
+ # Potential member doing registration. Default to skip
+ if 'peer_urls' not in peer.keys() or not peer['peer_urls']:
+ continue
+ peer_string = "{}={}".format(peer['name'], peer['peer_urls'])
+ cluster.append(peer_string)
+ # set the cluster string on the conversation
+ proxy.set_cluster_string(','.join(cluster))
+```
+
+
+# Contact Information
+
+### Maintainer
+- Charles Butler <[charles.butler@canonical.com](mailto:charles.butler@canonical.com)>
+
+### Contributors
+- Mathew Bruzek <[mathew.bruzek@canonical.com](mailto:mathew.bruzek@canonical.com)>
+
+# Etcd
+
+- [Etcd](https://coreos.com/etcd/) home page
+- [Etcd bug trackers](https://github.com/coreos/etcd/issues)
+- [Etcd Juju Charm](http://github.com/juju-solutions/layer-etcd)
diff --git a/etcd/hooks/relations/etcd-proxy/__init__.py b/etcd/hooks/relations/etcd-proxy/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/hooks/relations/etcd-proxy/interface.yaml b/etcd/hooks/relations/etcd-proxy/interface.yaml
new file mode 100644
index 0000000..9ed36a8
--- /dev/null
+++ b/etcd/hooks/relations/etcd-proxy/interface.yaml
@@ -0,0 +1,4 @@
+name: etcd-proxy
+summary: Interface for relating to ETCD
+version: 1
+maintainer: "Charles Butler "
diff --git a/etcd/hooks/relations/etcd-proxy/provides.py b/etcd/hooks/relations/etcd-proxy/provides.py
new file mode 100644
index 0000000..6d36d3b
--- /dev/null
+++ b/etcd/hooks/relations/etcd-proxy/provides.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdProvider(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:etcd-proxy}-relation-{joined,changed}')
+ def joined_or_changed(self):
+ ''' Set state so the unit can identify it is connecting '''
+ self.set_state('{relation_name}.connected')
+
+ @hook('{provides:etcd-proxy}-relation-{broken,departed}')
+ def broken_or_departed(self):
+ ''' Set state so the unit can identify it is departing '''
+ self.remove_state('{relation_name}.connected')
+
+ def set_client_credentials(self, key, cert, ca):
+ ''' Set the client credentials on the global conversation for this
+ relation. '''
+ self.set_remote('client_key', key)
+ self.set_remote('client_ca', ca)
+ self.set_remote('client_cert', cert)
+
+ def set_cluster_string(self, cluster_string):
+ ''' Set the cluster string on the convsersation '''
+ self.set_remote('cluster', cluster_string)
+
+ # Kept for backwords compatibility
+ def provide_cluster_string(self, cluster_string):
+ '''
+ @params cluster_string - fully formed etcd cluster string.
+ This is akin to the --initial-cluster-string setting to the
+ etcd-daemon. Proxy's will need to know each declared member of
+ the cluster to effectively proxy.
+ '''
+ self.set_remote('cluster', cluster_string)
diff --git a/etcd/hooks/relations/etcd-proxy/requires.py b/etcd/hooks/relations/etcd-proxy/requires.py
new file mode 100644
index 0000000..1fdcced
--- /dev/null
+++ b/etcd/hooks/relations/etcd-proxy/requires.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdClientProxy(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{requires:etcd-proxy}-relation-{joined,changed}')
+ def changed(self):
+ self.set_state('{relation_name}.connected')
+ if self.get_cluster_string():
+ self.set_state('{relation_name}.available')
+ # Get the ca, key, cert from the relation data.
+ cert = self.get_client_credentials()
+ # The tls state depends on the existance of the ca, key and cert.
+ if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa
+ self.set_state('{relation_name}.tls.available')
+
+ @hook('{requires:etcd-proxy}-relation-{broken, departed}')
+ def broken(self):
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+ self.remove_state('{relation_name}.tls.available')
+
+ def get_cluster_string(self):
+ ''' Return the connection string, if available, or None. '''
+ return self.get_remote('cluster')
+
+ def get_client_credentials(self):
+ ''' Return a dict with the client certificate, ca and key to
+ communicate with etcd using tls. '''
+ return {'client_cert': self.get_remote('client_cert'),
+ 'client_key': self.get_remote('client_key'),
+ 'client_ca': self.get_remote('client_ca')}
+
+ def cluster_string(self):
+ """
+ Get the cluster string, if available, or None.
+ """
+ return self.get_cluster_string()
+
+ def save_client_credentials(self, key, cert, ca):
+ ''' Save all the client certificates for etcd to local files. '''
+ self._save_remote_data('client_cert', cert)
+ self._save_remote_data('client_key', key)
+ self._save_remote_data('client_ca', ca)
+
+ def _save_remote_data(self, key, path):
+ ''' Save the remote data to a file indicated by path creating the
+ parent directory if needed.'''
+ value = self.get_remote(key)
+ if value:
+ parent = os.path.dirname(path)
+ if not os.path.isdir(parent):
+ os.makedirs(parent)
+ with open(path, 'w') as stream:
+ stream.write(value)
diff --git a/etcd/hooks/relations/etcd/.gitignore b/etcd/hooks/relations/etcd/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/etcd/hooks/relations/etcd/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/etcd/hooks/relations/etcd/README.md b/etcd/hooks/relations/etcd/README.md
new file mode 100644
index 0000000..9ed51dd
--- /dev/null
+++ b/etcd/hooks/relations/etcd/README.md
@@ -0,0 +1,89 @@
+# Overview
+
+This interface layer handles the communication with Etcd via the `etcd`
+interface.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established, but Etcd may not
+ yet have provided any connection or service information.
+
+ * `{relation_name}.available` Etcd has provided its connection string
+ information, and is ready to serve as a KV store.
+ The provided information can be accessed via the following methods:
+ * `etcd.get_connection_string()`
+ * `etcd.get_version()`
+ * `{relation_name}.tls.available` Etcd has provided the connection string
+ information, and the tls client credentials to communicate with it.
+ The client credentials can be accessed via:
+ * `{relation_name}.get_client_credentials()` returning a dictionary of
+ the clinet certificate, key and CA.
+ * `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience
+ method to save the client certificate, key and CA to files of your
+ choosing.
+
+
+For example, a common application for this is configuring an applications
+backend key/value storage, like Docker.
+
+```python
+@when('etcd.available', 'docker.available')
+def swarm_etcd_cluster_setup(etcd):
+ con_string = etcd.connection_string().replace('http', 'etcd')
+ opts = {}
+ opts['connection_string'] = con_string
+ render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts)
+
+```
+
+
+## Provides
+
+A charm providing this interface is providing the Etcd rest api service.
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` One or more clients of any type have
+ been related. The charm should call the following methods to provide the
+ appropriate information to the clients:
+
+ * `{relation_name}.set_connection_string(string, version)`
+ * `{relation_name}.set_client_credentials(key, cert, ca)`
+
+Example:
+
+```python
+@when('db.connected')
+def send_connection_details(db):
+ cert = leader_get('client_certificate')
+ key = leader_get('client_key')
+ ca = leader_get('certificate_authority')
+ # Set the key, cert, and ca on the db relation
+ db.set_client_credentials(key, cert, ca)
+
+ port = hookenv.config().get('port')
+ # Get all the peers participating in the cluster relation.
+ addresses = cluster.get_peer_addresses()
+ connections = []
+ for address in addresses:
+ connections.append('http://{0}:{1}'.format(address, port))
+ # Set the connection string on the db relation.
+ db.set_connection_string(','.join(conections))
+```
+
+
+# Contact Information
+
+### Maintainer
+- Charles Butler
+
+
+# Etcd
+
+- [Etcd](https://coreos.com/etcd/) home page
+- [Etcd bug trackers](https://github.com/coreos/etcd/issues)
+- [Etcd Juju Charm](http://jujucharms.com/?text=etcd)
diff --git a/etcd/hooks/relations/etcd/__init__.py b/etcd/hooks/relations/etcd/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/hooks/relations/etcd/interface.yaml b/etcd/hooks/relations/etcd/interface.yaml
new file mode 100644
index 0000000..929b1d5
--- /dev/null
+++ b/etcd/hooks/relations/etcd/interface.yaml
@@ -0,0 +1,4 @@
+name: etcd
+summary: Interface for relating to ETCD
+version: 2
+maintainer: "Charles Butler "
diff --git a/etcd/hooks/relations/etcd/peers.py b/etcd/hooks/relations/etcd/peers.py
new file mode 100644
index 0000000..90980d1
--- /dev/null
+++ b/etcd/hooks/relations/etcd/peers.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdPeer(RelationBase):
+ '''This class handles peer relation communication by setting states that
+ the reactive code can respond to. '''
+
+ scope = scopes.UNIT
+
+ @hook('{peers:etcd}-relation-joined')
+ def peer_joined(self):
+ '''A new peer has joined, set the state on the unit so we can track
+ when they are departed. '''
+ conv = self.conversation()
+ conv.set_state('{relation_name}.joined')
+
+ @hook('{peers:etcd}-relation-departed')
+ def peers_going_away(self):
+ '''Trigger a state on the unit that it is leaving. We can use this
+ state in conjunction with the joined state to determine which unit to
+ unregister from the etcd cluster. '''
+ conv = self.conversation()
+ conv.remove_state('{relation_name}.joined')
+ conv.set_state('{relation_name}.departing')
+
+ def dismiss(self):
+ '''Remove the departing state from all other units in the conversation,
+ and we can resume normal operation.
+ '''
+ for conv in self.conversations():
+ conv.remove_state('{relation_name}.departing')
+
+ def get_peers(self):
+ '''Return a list of names for the peers participating in this
+ conversation scope. '''
+ peers = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ peers.append(conversation.scope)
+ return peers
+
+ def set_db_ingress_address(self, address):
+ '''Set the ingress address belonging to the db relation.'''
+ for conversation in self.conversations():
+ conversation.set_remote('db-ingress-address', address)
+
+ def get_db_ingress_addresses(self):
+ '''Return a list of db ingress addresses'''
+ addresses = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ address = conversation.get_remote('db-ingress-address')
+ if address:
+ addresses.append(address)
+ return addresses
diff --git a/etcd/hooks/relations/etcd/provides.py b/etcd/hooks/relations/etcd/provides.py
new file mode 100644
index 0000000..3cfc174
--- /dev/null
+++ b/etcd/hooks/relations/etcd/provides.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdProvider(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:etcd}-relation-{joined,changed}')
+ def joined_or_changed(self):
+ ''' Set the connected state from the provides side of the relation. '''
+ self.set_state('{relation_name}.connected')
+
+ @hook('{provides:etcd}-relation-{broken,departed}')
+ def broken_or_departed(self):
+ '''Remove connected state from the provides side of the relation. '''
+ conv = self.conversation()
+ if len(conv.units) == 1:
+ conv.remove_state('{relation_name}.connected')
+
+ def set_client_credentials(self, key, cert, ca):
+ ''' Set the client credentials on the global conversation for this
+ relation. '''
+ self.set_remote('client_key', key)
+ self.set_remote('client_ca', ca)
+ self.set_remote('client_cert', cert)
+
+ def set_connection_string(self, connection_string, version=''):
+ ''' Set the connection string on the global conversation for this
+ relation. '''
+ # Note: Version added as a late-dependency for 2 => 3 migration
+ # If no version is specified, consumers should presume etcd 2.x
+ self.set_remote('connection_string', connection_string)
+ self.set_remote('version', version)
diff --git a/etcd/hooks/relations/etcd/requires.py b/etcd/hooks/relations/etcd/requires.py
new file mode 100644
index 0000000..435532f
--- /dev/null
+++ b/etcd/hooks/relations/etcd/requires.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdClient(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{requires:etcd}-relation-{joined,changed}')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ self.set_state('{relation_name}.connected')
+
+ if self.get_connection_string():
+ self.set_state('{relation_name}.available')
+ # Get the ca, key, cert from the relation data.
+ cert = self.get_client_credentials()
+ # The tls state depends on the existance of the ca, key and cert.
+ if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa
+ self.set_state('{relation_name}.tls.available')
+
+ @hook('{requires:etcd}-relation-{broken, departed}')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+ self.remove_state('{relation_name}.tls.available')
+
+ def connection_string(self):
+ ''' This method is depreciated but ensures backward compatibility
+ @see get_connection_string(self). '''
+ return self.get_connection_string()
+
+ def get_connection_string(self):
+ ''' Return the connection string, if available, or None. '''
+ return self.get_remote('connection_string')
+
+ def get_version(self):
+ ''' Return the version of the etd protocol being used, or None. '''
+ return self.get_remote('version')
+
+ def get_client_credentials(self):
+ ''' Return a dict with the client certificate, ca and key to
+ communicate with etcd using tls. '''
+ return {'client_cert': self.get_remote('client_cert'),
+ 'client_key': self.get_remote('client_key'),
+ 'client_ca': self.get_remote('client_ca')}
+
+ def save_client_credentials(self, key, cert, ca):
+ ''' Save all the client certificates for etcd to local files. '''
+ self._save_remote_data('client_cert', cert)
+ self._save_remote_data('client_key', key)
+ self._save_remote_data('client_ca', ca)
+
+ def _save_remote_data(self, key, path):
+ ''' Save the remote data to a file indicated by path creating the
+ parent directory if needed.'''
+ value = self.get_remote(key)
+ if value:
+ parent = os.path.dirname(path)
+ if not os.path.isdir(parent):
+ os.makedirs(parent)
+ with open(path, 'w') as stream:
+ stream.write(value)
diff --git a/etcd/hooks/relations/nrpe-external-master/README.md b/etcd/hooks/relations/nrpe-external-master/README.md
new file mode 100644
index 0000000..e33deb8
--- /dev/null
+++ b/etcd/hooks/relations/nrpe-external-master/README.md
@@ -0,0 +1,66 @@
+# nrpe-external-master interface
+
+Use this interface to register nagios checks in your charm layers.
+
+## Purpose
+
+This interface is designed to interoperate with the
+[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm.
+
+## How to use in your layers
+
+The event handler for `nrpe-external-master.available` is called with an object
+through which you can register your own custom nagios checks, when a relation
+is established with `nrpe-external-master:nrpe-external-master`.
+
+This object provides a method,
+
+_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_)
+
+which is called to register a nagios plugin check for your service.
+
+All arguments are required.
+
+*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable.
+
+*name* is the name of the check registered in nagios
+
+*description* is some text that describes what the check is for and what it does
+
+*context* is the nagios context name, something that identifies your application
+
+*unit* is `hookenv.local_unit()`
+
+The nrpe subordinate installs `check_http`, so you can use it like this:
+
+```
+@when('nrpe-external-master.available')
+def setup_nagios(nagios):
+ config = hookenv.config()
+ unit_name = hookenv.local_unit()
+ nagios.add_check(['/usr/lib/nagios/plugins/check_http',
+ '-I', '127.0.0.1', '-p', str(config['port']),
+ '-e', " 200 OK", '-u', '/publickey'],
+ name="check_http",
+ description="Verify my awesome service is responding",
+ context=config["nagios_context"],
+ unit=unit_name,
+ )
+```
+If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service.
+
+Consult the nagios documentation for more information on [how to write your own
+plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html)
+or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need.
+
+## Example deployment
+
+```
+$ juju deploy your-awesome-charm
+$ juju deploy nrpe-external-master --config site-nagios.yaml
+$ juju add-relation your-awesome-charm nrpe-external-master
+```
+
+where `site-nagios.yaml` has the necessary configuration settings for the
+subordinate to connect to nagios.
+
diff --git a/etcd/hooks/relations/nrpe-external-master/__init__.py b/etcd/hooks/relations/nrpe-external-master/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/hooks/relations/nrpe-external-master/interface.yaml b/etcd/hooks/relations/nrpe-external-master/interface.yaml
new file mode 100644
index 0000000..859a423
--- /dev/null
+++ b/etcd/hooks/relations/nrpe-external-master/interface.yaml
@@ -0,0 +1,3 @@
+name: nrpe-external-master
+summary: Nagios interface
+version: 1
diff --git a/etcd/hooks/relations/nrpe-external-master/provides.py b/etcd/hooks/relations/nrpe-external-master/provides.py
new file mode 100644
index 0000000..b6c7f0d
--- /dev/null
+++ b/etcd/hooks/relations/nrpe-external-master/provides.py
@@ -0,0 +1,91 @@
+import datetime
+import os
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+
+
+class NrpeExternalMasterProvides(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:nrpe-external-master}-relation-{joined,changed}')
+ def changed_nrpe(self):
+ self.set_state('{relation_name}.available')
+
+ @hook('{provides:nrpe-external-master}-relation-{broken,departed}')
+ def broken_nrpe(self):
+ self.remove_state('{relation_name}.available')
+
+ def add_check(self, args, name=None, description=None, context=None,
+ servicegroups=None, unit=None):
+ nagios_files = self.get_local('nagios.check.files', [])
+
+ if not unit:
+ unit = hookenv.local_unit()
+ unit = unit.replace('/', '-')
+ context = self.get_remote('nagios_host_context', context)
+ host_name = self.get_remote('nagios_hostname',
+ '%s-%s' % (context, unit))
+
+ check_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+command[%(check_name)s]=%(check_args)s
+"""
+ service_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {
+ use active-service
+ host_name %(host_name)s
+ service_description %(description)s
+ check_command check_nrpe!%(check_name)s
+ servicegroups %(servicegroups)s
+}
+"""
+ check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name)
+ with open(check_filename, "w") as fh:
+ fh.write(check_tmpl % {
+ 'check_args': ' '.join(args),
+ 'check_name': name,
+ })
+ nagios_files.append(check_filename)
+
+ service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % (
+ unit, name)
+ with open(service_filename, "w") as fh:
+ fh.write(service_tmpl % {
+ 'servicegroups': servicegroups or context,
+ 'context': context,
+ 'description': description,
+ 'check_name': name,
+ 'host_name': host_name,
+ 'unit_name': unit,
+ })
+ nagios_files.append(service_filename)
+
+ self.set_local('nagios.check.files', nagios_files)
+
+ def removed(self):
+ files = self.get_local('nagios.check.files', [])
+ for f in files:
+ try:
+ os.unlink(f)
+ except Exception as e:
+ hookenv.log("failed to remove %s: %s" % (f, e))
+ self.set_local('nagios.check.files', [])
+ self.remove_state('{relation_name}.removed')
+
+ def added(self):
+ self.updated()
+
+ def updated(self):
+ relation_info = {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ }
+ self.set_remote(**relation_info)
diff --git a/etcd/hooks/relations/nrpe-external-master/requires.py b/etcd/hooks/relations/nrpe-external-master/requires.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/hooks/relations/tls-certificates/.gitignore b/etcd/hooks/relations/tls-certificates/.gitignore
new file mode 100644
index 0000000..93813bc
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/.gitignore
@@ -0,0 +1,4 @@
+.tox
+__pycache__
+*.pyc
+_build
diff --git a/etcd/hooks/relations/tls-certificates/README.md b/etcd/hooks/relations/tls-certificates/README.md
new file mode 100644
index 0000000..733da6d
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/README.md
@@ -0,0 +1,90 @@
+# Interface tls-certificates
+
+This is a [Juju][] interface layer that enables a charm which requires TLS
+certificates to relate to a charm which can provide them, such as [Vault][] or
+[EasyRSA][]
+
+To get started please read the [Introduction to PKI][] which defines some PKI
+terms, concepts and processes used in this document.
+
+# Example Usage
+
+Let's say you have a charm which needs a server certificate for a service it
+provides to other charms and a client certificate for a database it consumes
+from another charm. The charm provides its own service on the `clients`
+relation endpoint, and it consumes the database on the `db` relation endpoint.
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+requires:
+ cert-provider:
+ interface: tls-certificates
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:tls-certificates
+```
+
+Then, in your reactive code, add the following, changing `update_certs` to
+handle the certificates however your charm needs:
+
+```python
+from charmhelpers.core import hookenv, host
+from charms.reactive import endpoint_from_flag
+
+
+@when('cert-provider.ca.changed')
+def install_root_ca_cert():
+ cert_provider = endpoint_from_flag('cert-provider.ca.available')
+ host.install_ca_cert(cert_provider.root_ca_cert)
+ clear_flag('cert-provider.ca.changed')
+
+
+@when('cert-provider.available')
+def request_certificates():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+
+ # get ingress info
+ ingress_for_clients = hookenv.network_get('clients')['ingress-addresses']
+ ingress_for_db = hookenv.network_get('db')['ingress-addresses']
+
+ # use first ingress address as primary and any additional as SANs
+ server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1]
+ client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1]
+
+ # request a single server and single client cert; note that multiple certs
+ # of either type can be requested as long as they have unique common names
+ cert_provider.request_server_cert(server_cn, server_sans)
+ cert_provider.request_client_cert(client_cn, client_sans)
+
+
+@when('cert-provider.certs.changed')
+def update_certs():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+ server_cert = cert_provider.server_certs[0] # only requested one
+ myserver.update_server_cert(server_cert.cert, server_cert.key)
+
+ client_cert = cert_provider.client_certs[0] # only requested one
+ myclient.update_client_cert(client_cert.cert, client_cert.key)
+ clear_flag('cert-provider.certs.changed')
+```
+
+
+# Reference
+
+ * [Requires](docs/requires.md)
+ * [Provides](docs/provides.md)
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Vault]: https://jujucharms.com/u/openstack-charmers/vault
+[EasyRSA]: https://jujucharms.com/u/containers/easyrsa
+[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md
diff --git a/etcd/hooks/relations/tls-certificates/__init__.py b/etcd/hooks/relations/tls-certificates/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/hooks/relations/tls-certificates/docs/common.md b/etcd/hooks/relations/tls-certificates/docs/common.md
new file mode 100644
index 0000000..25d0e08
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/docs/common.md
@@ -0,0 +1,51 @@
+
+
+Name of the application which the request came from.
+
+:returns: Name of application
+:rtype: str
+
+
cert
+
+
+The cert published for this request, if any.
+
+
cert_type
+
+
+Type of certificate, 'server' or 'client', being requested.
+
+
resolve_unit_name
+
+```python
+CertificateRequest.resolve_unit_name(unit)
+```
+Return name of unit associated with this request.
+
+unit_name should be provided in the relation data to ensure
+compatability with cross-model relations. If the unit name
+is absent then fall back to unit_name attribute of the
+unit associated with this request.
+
+:param unit: Unit to extract name from
+:type unit: charms.reactive.endpoints.RelatedUnit
+:returns: Name of unit
+:rtype: str
+
+
Certificate
+
+```python
+Certificate(self, cert_type, common_name, cert, key)
+```
+
+Represents a created certificate and key.
+
+The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+be accessed either as properties or as the contents of the dict.
+
diff --git a/etcd/hooks/relations/tls-certificates/docs/provides.md b/etcd/hooks/relations/tls-certificates/docs/provides.md
new file mode 100644
index 0000000..c213546
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/docs/provides.md
@@ -0,0 +1,212 @@
+
provides
+
+
+
TlsProvides
+
+```python
+TlsProvides(self, endpoint_name, relation_ids=None)
+```
+
+The provider's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[all_requests]: provides.md#provides.TlsProvides.all_requests
+[new_requests]: provides.md#provides.TlsProvides.new_requests
+[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+
+
all_published_certs
+
+
+List of all [Certificate][] instances that this provider has published
+for all related applications.
+
+
all_requests
+
+
+List of all requests that have been made.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('certs.regen',
+ 'tls.certs.available')
+def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_application_requests
+
+
+Filtered view of [new_requests][] that only includes application cert
+requests.
+
+Each will be an instance of [ApplicationCertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.application.certs.requested')
+def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_client_requests
+
+
+Filtered view of [new_requests][] that only includes client cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.client.certs.requested')
+def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_requests
+
+
+Filtered view of [all_requests][] that only includes requests that
+haven't been handled.
+
+Each will be an instance of [CertificateRequest][].
+
+This collection can also be further filtered by request type using
+[new_server_requests][] or [new_client_requests][].
+
+Example usage:
+
+```python
+@when('tls.certs.requested')
+def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_server_requests
+
+
+Filtered view of [new_requests][] that only includes server cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.server.certs.requested')
+def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
set_ca
+
+```python
+TlsProvides.set_ca(certificate_authority)
+```
+
+Publish the CA to all related applications.
+
+
set_chain
+
+```python
+TlsProvides.set_chain(chain)
+```
+
+Publish the chain of trust to all related applications.
+
+
set_client_cert
+
+```python
+TlsProvides.set_client_cert(cert, key)
+```
+
+Deprecated. This is only for backwards compatibility.
+
+Publish a globally shared client cert and key.
+
+
set_server_cert
+
+```python
+TlsProvides.set_server_cert(scope, cert, key)
+```
+
+Deprecated. Use one of the [new_requests][] collections and
+`request.set_cert()` instead.
+
+Set the server cert and key for the request identified by `scope`.
+
+
+
+```python
+TlsProvides.get_server_requests()
+```
+
+Deprecated. Use the [new_requests][] or [server_requests][]
+collections instead.
+
+One provider can have many requests to generate server certificates.
+Return a map of all server request objects indexed by a unique
+identifier.
+
diff --git a/etcd/hooks/relations/tls-certificates/docs/requires.md b/etcd/hooks/relations/tls-certificates/docs/requires.md
new file mode 100644
index 0000000..fdec902
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/docs/requires.md
@@ -0,0 +1,207 @@
+
requires
+
+
+
TlsRequires
+
+```python
+TlsRequires(self, endpoint_name, relation_ids=None)
+```
+
+The client's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+[server_certs]: requires.md#requires.TlsRequires.server_certs
+[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+[client_certs]: requires.md#requires.TlsRequires.server_certs
+
+
application_certs
+
+
+List of [Certificate][] instances for all available application certs.
+
+
client_certs
+
+
+List of [Certificate][] instances for all available client certs.
+
+
client_certs_map
+
+
+Mapping of client [Certificate][] instances by their `common_name`.
+
+
root_ca_cert
+
+
+Root CA certificate.
+
+
root_ca_chain
+
+
+The chain of trust for the root CA.
+
+
server_certs
+
+
+List of [Certificate][] instances for all available server certs.
+
+
server_certs_map
+
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
get_ca
+
+```python
+TlsRequires.get_ca()
+```
+
+Return the root CA certificate.
+
+Same as [root_ca_cert][].
+
+
get_chain
+
+```python
+TlsRequires.get_chain()
+```
+
+Return the chain of trust for the root CA.
+
+Same as [root_ca_chain][].
+
+
get_client_cert
+
+```python
+TlsRequires.get_client_cert()
+```
+
+Deprecated. Use [request_client_cert][] and the [client_certs][]
+collection instead.
+
+Return a globally shared client certificate and key.
+
+
get_server_cert
+
+```python
+TlsRequires.get_server_cert()
+```
+
+Deprecated. Use the [server_certs][] collection instead.
+
+Return the cert and key of the first server certificate requested.
+
+
get_batch_requests
+
+```python
+TlsRequires.get_batch_requests()
+```
+
+Deprecated. Use [server_certs_map][] instead.
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
request_server_cert
+
+```python
+TlsRequires.request_server_cert(cn, sans=None, cert_name=None)
+```
+
+Request a server certificate and key be generated for the given
+common name (`cn`) and optional list of alternative names (`sans`).
+
+The `cert_name` is deprecated and not needed.
+
+This can be called multiple times to request more than one server
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
+
+```python
+TlsRequires.request_server_certs()
+```
+
+Deprecated. Just use [request_server_cert][]; this does nothing.
+
+
request_client_cert
+
+```python
+TlsRequires.request_client_cert(cn, sans)
+```
+
+Request a client certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans`).
+
+This can be called multiple times to request more than one client
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
request_application_cert
+
+```python
+TlsRequires.request_application_cert(cn, sans)
+```
+
+Request an application certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans` ) of this
+unit and all peer units. All units will share a single certificates.
+
diff --git a/etcd/hooks/relations/tls-certificates/interface.yaml b/etcd/hooks/relations/tls-certificates/interface.yaml
new file mode 100644
index 0000000..beec53b
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/interface.yaml
@@ -0,0 +1,6 @@
+name: tls-certificates
+summary: |
+ A Transport Layer Security (TLS) charm layer that uses requires and provides
+ to exchange certifcates.
+version: 1
+repo: https://github.com/juju-solutions/interface-tls-certificates
diff --git a/etcd/hooks/relations/tls-certificates/make_docs b/etcd/hooks/relations/tls-certificates/make_docs
new file mode 100644
index 0000000..2f2274a
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/make_docs
@@ -0,0 +1,23 @@
+#!.tox/py3/bin/python
+
+import sys
+import importlib
+from pathlib import Path
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'cert': {'interface': 'tls-certificates'}},
+ 'provides': {'cert': {'interface': 'tls-certificates'}},
+ }
+ sys.path.append('..')
+ sys.modules[''] = importlib.import_module(Path.cwd().name)
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/etcd/hooks/relations/tls-certificates/provides.py b/etcd/hooks/relations/tls-certificates/provides.py
new file mode 100644
index 0000000..0262baa
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/provides.py
@@ -0,0 +1,301 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+
+from .tls_certificates_common import (
+ ApplicationCertificateRequest,
+ CertificateRequest
+)
+
+
+class TlsProvides(Endpoint):
+ """
+ The provider's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [all_requests]: provides.md#provides.TlsProvides.all_requests
+ [new_requests]: provides.md#provides.TlsProvides.new_requests
+ [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+ [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+ toggle_flag(self.expand_name('{endpoint_name}.certs.requested'),
+ self.new_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'),
+ self.new_client_requests)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'),
+ self.new_application_requests)
+ # For backwards compatibility, set the old "cert" flags as well
+ toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'),
+ self.new_client_requests)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.server.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.client.certs.requested'))
+ clear_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'))
+
+ def set_ca(self, certificate_authority):
+ """
+ Publish the CA to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same CA, so send it to them.
+ relation.to_publish_raw['ca'] = certificate_authority
+
+ def set_chain(self, chain):
+ """
+ Publish the chain of trust to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same chain, so send it to them.
+ relation.to_publish_raw['chain'] = chain
+
+ def set_client_cert(self, cert, key):
+ """
+ Deprecated. This is only for backwards compatibility.
+
+ Publish a globally shared client cert and key.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'client.cert': cert,
+ 'client.key': key,
+ })
+
+ def set_server_cert(self, scope, cert, key):
+ """
+ Deprecated. Use one of the [new_requests][] collections and
+ `request.set_cert()` instead.
+
+ Set the server cert and key for the request identified by `scope`.
+ """
+ request = self.get_server_requests()[scope]
+ request.set_cert(cert, key)
+
+ def set_server_multicerts(self, scope):
+ """
+ Deprecated. Done automatically.
+ """
+ pass
+
+ def add_server_cert(self, scope, cn, cert, key):
+ '''
+ Deprecated. Use `request.set_cert()` instead.
+ '''
+ self.set_server_cert(scope, cert, key)
+
+ def get_server_requests(self):
+ """
+ Deprecated. Use the [new_requests][] or [server_requests][]
+ collections instead.
+
+ One provider can have many requests to generate server certificates.
+ Return a map of all server request objects indexed by a unique
+ identifier.
+ """
+ return {req._key: req for req in self.new_server_requests}
+
+ @property
+ def all_requests(self):
+ """
+ List of all requests that have been made.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('certs.regen',
+ 'tls.certs.available')
+ def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ requests = []
+ for unit in self.all_joined_units:
+ # handle older single server cert request
+ if unit.received_raw['common_name']:
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ unit.received_raw['certificate_name'],
+ unit.received_raw['common_name'],
+ unit.received['sans'],
+ ))
+
+ # handle mutli server cert requests
+ reqs = unit.received['cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+
+ # handle client cert requests
+ reqs = unit.received['client_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'client',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+ # handle application cert requests
+ reqs = unit.received['application_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(ApplicationCertificateRequest(
+ unit,
+ 'application',
+ common_name,
+ common_name,
+ req['sans']
+ ))
+ return requests
+
+ @property
+ def new_requests(self):
+ """
+ Filtered view of [all_requests][] that only includes requests that
+ haven't been handled.
+
+ Each will be an instance of [CertificateRequest][].
+
+ This collection can also be further filtered by request type using
+ [new_server_requests][] or [new_client_requests][].
+
+ Example usage:
+
+ ```python
+ @when('tls.certs.requested')
+ def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.all_requests if not req.is_handled]
+
+ @property
+ def new_server_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes server cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.server.certs.requested')
+ def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'server']
+
+ @property
+ def new_client_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes client cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.client.certs.requested')
+ def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'client']
+
+ @property
+ def new_application_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes application cert
+ requests.
+
+ Each will be an instance of [ApplicationCertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.application.certs.requested')
+ def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+
+ :returns: List of certificate requests.
+ :rtype: [CertificateRequest, ]
+ """
+ return [req for req in self.new_requests
+ if req.cert_type == 'application']
+
+ @property
+ def all_published_certs(self):
+ """
+ List of all [Certificate][] instances that this provider has published
+ for all related applications.
+ """
+ return [req.cert for req in self.all_requests if req.cert]
diff --git a/etcd/hooks/relations/tls-certificates/pydocmd.yml b/etcd/hooks/relations/tls-certificates/pydocmd.yml
new file mode 100644
index 0000000..c568913
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'TLS Certificates Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.TlsRequires+
+ - provides.md:
+ - provides
+ - provides.TlsProvides+
+ - common.md:
+ - tls_certificates_common.CertificateRequest+
+ - tls_certificates_common.Certificate+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Common: common.md
+
+gens_dir: docs
diff --git a/etcd/hooks/relations/tls-certificates/requires.py b/etcd/hooks/relations/tls-certificates/requires.py
new file mode 100644
index 0000000..951f953
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/requires.py
@@ -0,0 +1,342 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+import uuid
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+from charms.reactive import Endpoint
+from charms.reactive import data_changed
+
+from .tls_certificates_common import Certificate
+
+
+class TlsRequires(Endpoint):
+ """
+ The client's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+ [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+ [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+ [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+ [server_certs]: requires.md#requires.TlsRequires.server_certs
+ [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+ [client_certs]: requires.md#requires.TlsRequires.server_certs
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ self.relations[0].to_publish_raw['unit_name'] = self._unit_name
+ prefix = self.expand_name('{endpoint_name}.')
+ ca_available = self.root_ca_cert
+ ca_changed = ca_available and data_changed(prefix + 'ca',
+ self.root_ca_cert)
+ server_available = self.server_certs
+ server_changed = server_available and data_changed(prefix + 'servers',
+ self.server_certs)
+ client_available = self.client_certs
+ client_changed = client_available and data_changed(prefix + 'clients',
+ self.client_certs)
+ certs_available = server_available or client_available
+ certs_changed = server_changed or client_changed
+
+ set_flag(prefix + 'available')
+ toggle_flag(prefix + 'ca.available', ca_available)
+ toggle_flag(prefix + 'ca.changed', ca_changed)
+ toggle_flag(prefix + 'server.certs.available', server_available)
+ toggle_flag(prefix + 'server.certs.changed', server_changed)
+ toggle_flag(prefix + 'client.certs.available', client_available)
+ toggle_flag(prefix + 'client.certs.changed', client_changed)
+ toggle_flag(prefix + 'certs.available', certs_available)
+ toggle_flag(prefix + 'certs.changed', certs_changed)
+ # deprecated
+ toggle_flag(prefix + 'server.cert.available', self.server_certs)
+ toggle_flag(prefix + 'client.cert.available', self.get_client_cert())
+ toggle_flag(prefix + 'batch.cert.available', self.server_certs)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ prefix = self.expand_name('{endpoint_name}.')
+ clear_flag(prefix + 'available')
+ clear_flag(prefix + 'ca.available')
+ clear_flag(prefix + 'ca.changed')
+ clear_flag(prefix + 'server.certs.available')
+ clear_flag(prefix + 'server.certs.changed')
+ clear_flag(prefix + 'client.certs.available')
+ clear_flag(prefix + 'client.certs.changed')
+ clear_flag(prefix + 'certs.available')
+ clear_flag(prefix + 'certs.changed')
+ # deprecated
+ clear_flag(prefix + 'server.cert.available')
+ clear_flag(prefix + 'client.cert.available')
+ clear_flag(prefix + 'batch.cert.available')
+
+ @property
+ def _unit_name(self):
+ return hookenv.local_unit().replace('/', '_')
+
+ @property
+ def root_ca_cert(self):
+ """
+ Root CA certificate.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['ca']
+
+ def get_ca(self):
+ """
+ Return the root CA certificate.
+
+ Same as [root_ca_cert][].
+ """
+ return self.root_ca_cert
+
+ @property
+ def root_ca_chain(self):
+ """
+ The chain of trust for the root CA.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['chain']
+
+ def get_chain(self):
+ """
+ Return the chain of trust for the root CA.
+
+ Same as [root_ca_chain][].
+ """
+ return self.root_ca_chain
+
+ def get_client_cert(self):
+ """
+ Deprecated. Use [request_client_cert][] and the [client_certs][]
+ collection instead.
+
+ Return a globally shared client certificate and key.
+ """
+ data = self.all_joined_units.received_raw
+ return (data['client.cert'], data['client.key'])
+
+ def get_server_cert(self):
+ """
+ Deprecated. Use the [server_certs][] collection instead.
+
+ Return the cert and key of the first server certificate requested.
+ """
+ if not self.server_certs:
+ return (None, None)
+ cert = self.server_certs[0]
+ return (cert.cert, cert.key)
+
+ @property
+ def server_certs(self):
+ """
+ List of [Certificate][] instances for all available server certs.
+ """
+ certs = []
+ raw_data = self.all_joined_units.received_raw
+ json_data = self.all_joined_units.received
+
+ # for backwards compatibility, the first cert goes in its own fields
+ if self.relations:
+ common_name = self.relations[0].to_publish_raw['common_name']
+ cert = raw_data['{}.server.cert'.format(self._unit_name)]
+ key = raw_data['{}.server.key'.format(self._unit_name)]
+ if cert and key:
+ certs.append(Certificate('server',
+ common_name,
+ cert,
+ key))
+
+ # subsequent requests go in the collection
+ field = '{}.processed_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ certs.extend(Certificate('server',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items())
+ return certs
+
+ @property
+ def application_certs(self):
+ """
+ List containg the application Certificate cert.
+
+ :returns: A list containing one certificate
+ :rtype: [Certificate()]
+ """
+ certs = []
+ json_data = self.all_joined_units.received
+ field = '{}.processed_application_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ app_cert_data = certs_data.get('app_data')
+ if app_cert_data:
+ certs = [Certificate(
+ 'server',
+ 'app_data',
+ app_cert_data['cert'],
+ app_cert_data['key'])]
+ return certs
+
+ @property
+ def server_certs_map(self):
+ """
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.server_certs}
+
+ def get_batch_requests(self):
+ """
+ Deprecated. Use [server_certs_map][] instead.
+
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return self.server_certs_map
+
+ @property
+ def client_certs(self):
+ """
+ List of [Certificate][] instances for all available client certs.
+ """
+ field = '{}.processed_client_requests'.format(self._unit_name)
+ certs_data = self.all_joined_units.received[field] or {}
+ return [Certificate('client',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items()]
+
+ @property
+ def client_certs_map(self):
+ """
+ Mapping of client [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.client_certs}
+
+ def request_server_cert(self, cn, sans=None, cert_name=None):
+ """
+ Request a server certificate and key be generated for the given
+ common name (`cn`) and optional list of alternative names (`sans`).
+
+ The `cert_name` is deprecated and not needed.
+
+ This can be called multiple times to request more than one server
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ to_publish_raw = self.relations[0].to_publish_raw
+ if to_publish_raw['common_name'] in (None, '', cn):
+ # for backwards compatibility, first request goes in its own fields
+ to_publish_raw['common_name'] = cn
+ to_publish_json['sans'] = sans or []
+ cert_name = to_publish_raw.get('certificate_name') or cert_name
+ if cert_name is None:
+ cert_name = str(uuid.uuid4())
+ to_publish_raw['certificate_name'] = cert_name
+ else:
+ # subsequent requests go in the collection
+ requests = to_publish_json.get('cert_requests', {})
+ requests[cn] = {'sans': sans or []}
+ to_publish_json['cert_requests'] = requests
+
+ def add_request_server_cert(self, cn, sans):
+ """
+ Deprecated. Use [request_server_cert][] instead.
+ """
+ self.request_server_cert(cn, sans)
+
+ def request_server_certs(self):
+ """
+ Deprecated. Just use [request_server_cert][]; this does nothing.
+ """
+ pass
+
+ def request_client_cert(self, cn, sans):
+ """
+ Request a client certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans`).
+
+ This can be called multiple times to request more than one client
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('client_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['client_cert_requests'] = requests
+
+ def request_application_cert(self, cn, sans):
+ """
+ Request an application certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans` ) of this
+ unit and all peer units. All units will share a single certificates.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('application_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['application_cert_requests'] = requests
diff --git a/etcd/hooks/relations/tls-certificates/tls_certificates_common.py b/etcd/hooks/relations/tls-certificates/tls_certificates_common.py
new file mode 100644
index 0000000..99a2f8c
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/tls_certificates_common.py
@@ -0,0 +1,302 @@
+from charms.reactive import clear_flag, is_data_changed, data_changed
+
+
+class CertificateRequest(dict):
+ def __init__(self, unit, cert_type, cert_name, common_name, sans):
+ self._unit = unit
+ self._cert_type = cert_type
+ super().__init__({
+ 'certificate_name': cert_name,
+ 'common_name': common_name,
+ 'sans': sans,
+ })
+
+ @property
+ def _key(self):
+ return '.'.join((self._unit.relation.relation_id,
+ self.unit_name,
+ self.common_name))
+
+ def resolve_unit_name(self, unit):
+ """Return name of unit associated with this request.
+
+ unit_name should be provided in the relation data to ensure
+ compatability with cross-model relations. If the unit name
+ is absent then fall back to unit_name attribute of the
+ unit associated with this request.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: Name of unit
+ :rtype: str
+ """
+ unit_name = unit.received_raw['unit_name']
+ if not unit_name:
+ unit_name = unit.unit_name
+ return unit_name
+
+ @property
+ def unit_name(self):
+ """Name of this unit.
+
+ :returns: Name of unit
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).replace('/', '_')
+
+ @property
+ def application_name(self):
+ """Name of the application which the request came from.
+
+ :returns: Name of application
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).split('/')[0]
+
+ @property
+ def cert_type(self):
+ """
+ Type of certificate, 'server' or 'client', being requested.
+ """
+ return self._cert_type
+
+ @property
+ def cert_name(self):
+ return self['certificate_name']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def sans(self):
+ return self['sans']
+
+ @property
+ def _publish_key(self):
+ if self.cert_type == 'server':
+ return '{}.processed_requests'.format(self.unit_name)
+ elif self.cert_type == 'client':
+ return '{}.processed_client_requests'.format(self.unit_name)
+ raise ValueError('Unknown cert_type: {}'.format(self.cert_type))
+
+ @property
+ def _server_cert_key(self):
+ return '{}.server.cert'.format(self.unit_name)
+
+ @property
+ def _server_key_key(self):
+ return '{}.server.key'.format(self.unit_name)
+
+ @property
+ def _is_top_level_server_cert(self):
+ return (self.cert_type == 'server' and
+ self.common_name == self._unit.received_raw['common_name'])
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+ """
+ cert, key = None, None
+ if self._is_top_level_server_cert:
+ tpr = self._unit.relation.to_publish_raw
+ cert = tpr[self._server_cert_key]
+ key = tpr[self._server_key_key]
+ else:
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get(self.common_name, {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ def set_cert(self, cert, key):
+ rel = self._unit.relation
+ if self._is_top_level_server_cert:
+ # backwards compatibility; if this is the cert that was requested
+ # as a single server cert, set it in the response as the single
+ # server cert
+ rel.to_publish_raw.update({
+ self._server_cert_key: cert,
+ self._server_key_key: key,
+ })
+ else:
+ data = rel.to_publish.get(self._publish_key, {})
+ data[self.common_name] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[self._publish_key] = data
+ if not rel.endpoint.new_server_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.server'
+ '.cert.requested'))
+ if not rel.endpoint.new_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.'
+ 'certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class ApplicationCertificateRequest(CertificateRequest):
+ """
+ A request for an application consistent certificate.
+
+ This is a request for a certificate that works for all units of an
+ application. All sans and cns are added together to produce one
+ certificate and the same certificate and key are sent to all the
+ units of an application. Only one ApplicationCertificateRequest
+ is needed per application.
+ """
+
+ @property
+ def _key(self):
+ """Key to identify this cert.
+
+ :returns: cert key
+ :rtype: str
+ """
+ return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert')
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+
+ :returns: Certificate
+ :rtype: Certificate or None
+ """
+ cert, key = None, None
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get('app_data', {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ """Whether the certificate has been handled.
+
+ :returns: If the cert has been handled
+ :rtype: bool
+ """
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ @property
+ def sans(self):
+ """Generate a list of all sans from all units of application
+
+ Examine all units of the application and compile a list of
+ all sans. CNs are treated as addition san entries.
+
+ :returns: List of sans
+ :rtype: List[str]
+ """
+ _sans = []
+ for unit in self._unit.relation.units:
+ reqs = unit.received['application_cert_requests'] or {}
+ for cn, req in reqs.items():
+ _sans.append(cn)
+ _sans.extend(req['sans'])
+ return sorted(list(set(_sans)))
+
+ @property
+ def _request_key(self):
+ """Key used to request cert
+
+ :returns: Key used to request cert
+ :rtype: str
+ """
+ return 'application_cert_requests'
+
+ def derive_publish_key(self, unit=None):
+ """Derive the application cert publish key for a unit.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: publish key
+ :rtype: str
+ """
+ if not unit:
+ unit = self._unit
+ unit_name = self.resolve_unit_name(unit).replace('/', '_')
+ return '{}.processed_application_requests'.format(unit_name)
+
+ @property
+ def _publish_key(self):
+ """Key used to publish cert
+
+ :returns: Key used to publish cert
+ :rtype: str
+ """
+ return self.derive_publish_key(unit=self._unit)
+
+ def set_cert(self, cert, key):
+ """Send the cert and key to all units of the application
+
+ :param cert: TLS Certificate
+ :type cert: str
+ :param key: TLS Private Key
+ :type cert: str
+ """
+ rel = self._unit.relation
+ for unit in self._unit.relation.units:
+ pub_key = self.derive_publish_key(unit=unit)
+ data = rel.to_publish.get(
+ pub_key,
+ {})
+ data['app_data'] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[pub_key] = data
+ if not rel.endpoint.new_application_requests:
+ clear_flag(rel.endpoint.expand_name(
+ '{endpoint_name}.application.certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class Certificate(dict):
+ """
+ Represents a created certificate and key.
+
+ The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+ be accessed either as properties or as the contents of the dict.
+ """
+ def __init__(self, cert_type, common_name, cert, key):
+ super().__init__({
+ 'cert_type': cert_type,
+ 'common_name': common_name,
+ 'cert': cert,
+ 'key': key,
+ })
+
+ @property
+ def cert_type(self):
+ return self['cert_type']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def cert(self):
+ return self['cert']
+
+ @property
+ def key(self):
+ return self['key']
diff --git a/etcd/hooks/relations/tls-certificates/tox.ini b/etcd/hooks/relations/tls-certificates/tox.ini
new file mode 100644
index 0000000..90de9d3
--- /dev/null
+++ b/etcd/hooks/relations/tls-certificates/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+envlist = py3
+skipsdist = true
+
+[testenv]
+basepython=python3
+envdir={toxworkdir}/py3
+deps=
+ pytest
+ charms.reactive
+ pydoc-markdown
+
+[testenv:docs]
+commands=python make_docs
+
+[flake8]
+ignore=E402
diff --git a/etcd/hooks/start b/etcd/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/stop b/etcd/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/update-status b/etcd/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/hooks/upgrade-charm b/etcd/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/etcd/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/etcd/icon.svg b/etcd/icon.svg
new file mode 100644
index 0000000..a6dddda
--- /dev/null
+++ b/etcd/icon.svg
@@ -0,0 +1,343 @@
+
+
+
+
diff --git a/etcd/layer.yaml b/etcd/layer.yaml
new file mode 100644
index 0000000..0706020
--- /dev/null
+++ b/etcd/layer.yaml
@@ -0,0 +1,47 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "interface:nrpe-external-master"
+- "layer:debug"
+- "interface:tls-certificates"
+- "layer:cis-benchmark"
+- "layer:leadership"
+- "layer:nagios"
+- "layer:tls-client"
+- "layer:snap"
+- "layer:cdk-service-kicker"
+- "layer:status"
+- "interface:etcd"
+- "interface:etcd-proxy"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests",
+ ".tox", "__pycache__", "Makefile", ".coverage"]
+"options":
+ "basic":
+ "packages": ["rsync"]
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+# These options are mirrored in the test suite as hard-coded values.
+# If these cert locations change, please update the test suite accordingly
+ "tls-client":
+ "ca_certificate_path": "/var/snap/etcd/common/ca.crt"
+ "server_certificate_path": "/var/snap/etcd/common/server.crt"
+ "server_key_path": "/var/snap/etcd/common/server.key"
+ "client_certificate_path": "/var/snap/etcd/common/client.crt"
+ "client_key_path": "/var/snap/etcd/common/client.key"
+ "cdk-service-kicker":
+ "services":
+ - "snap.etcd.etcd"
+ "cis-benchmark": {}
+ "debug": {}
+ "leadership": {}
+ "nagios": {}
+ "snap": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "etcd":
+ "etcd_conf_dir": "/var/snap/etcd/common"
+ "etcd_data_dir": "/var/snap/etcd/current"
+ "etcd_daemon_process": "snap.etcd.etcd"
+"repo": "https://github.com/juju-solutions/layer-etcd.git"
+"is": "etcd"
diff --git a/etcd/lib/charms/layer/__init__.py b/etcd/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/etcd/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/etcd/lib/charms/layer/basic.py b/etcd/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/etcd/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/etcd/lib/charms/layer/execd.py b/etcd/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/etcd/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<.
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.reactive import not_unless
+
+
+__all__ = ['leader_get', 'leader_set']
+
+
+@not_unless('leadership.is_leader')
+def leader_set(*args, **kw):
+ '''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
+
+ Settings may either be passed in as a single dictionary, or using
+ keyword arguments. All values must be strings.
+
+ The leadership.set.{key} reactive state will be set while the
+ leadership hook environment setting remains set.
+
+ Changed leadership settings will set the leadership.changed.{key}
+ and leadership.changed states. These states will remain set until
+ the following hook.
+
+ These state changes take effect immediately on the leader, and
+ in future hooks run on non-leaders. In this way both leaders and
+ non-leaders can share handlers, waiting on these states.
+ '''
+ if args:
+ if len(args) > 1:
+ raise TypeError('leader_set() takes 1 positional argument but '
+ '{} were given'.format(len(args)))
+ else:
+ settings = dict(args[0])
+ else:
+ settings = {}
+ settings.update(kw)
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+
+ for key, value in settings.items():
+ if value != previous.get(key):
+ reactive.set_state('leadership.changed.{}'.format(key))
+ reactive.set_state('leadership.changed')
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ hookenv.leader_set(settings)
+ unitdata.kv().update(settings, prefix='leadership.settings.')
+
+
+def leader_get(attribute=None):
+ '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
+ return hookenv.leader_get(attribute)
diff --git a/etcd/lib/debug_script.py b/etcd/lib/debug_script.py
new file mode 100644
index 0000000..e156924
--- /dev/null
+++ b/etcd/lib/debug_script.py
@@ -0,0 +1,8 @@
+import os
+
+dir = os.environ["DEBUG_SCRIPT_DIR"]
+
+
+def open_file(path, *args, **kwargs):
+ """ Open a file within the debug script dir """
+ return open(os.path.join(dir, path), *args, **kwargs)
diff --git a/etcd/lib/etcd_databag.py b/etcd/lib/etcd_databag.py
new file mode 100644
index 0000000..40207e8
--- /dev/null
+++ b/etcd/lib/etcd_databag.py
@@ -0,0 +1,123 @@
+from charms import layer
+from charmhelpers.core.hookenv import unit_get
+from charmhelpers.core.hookenv import config
+from charmhelpers.core.hookenv import is_leader
+from charmhelpers.core.hookenv import leader_get, leader_set
+from charmhelpers.core import unitdata
+from charms.reactive import is_state
+from etcd_lib import get_ingress_address
+from etcd_lib import get_bind_address
+
+import string
+import random
+import os
+
+
+class EtcdDatabag:
+ '''
+ This class represents a configuration object to ease configuration of an
+ etcd unit during deployment and reconfiguration. The full dict of data
+ when expanded looks like the following:
+
+ {'public_address': '127.0.0.1',
+ 'cluster_bind_address': '127.0.0.1',
+ 'db_bind_address': '127.0.0.1',
+ 'cluster_address': '127.0.0.1',
+ 'db_address': '127.0.0.1',
+ 'unit_name': 'etcd0',
+ 'port': '2380',
+ 'management_port': '2379',
+ 'ca_certificate': '/etc/ssl/etcd/ca.crt',
+ 'server_certificate': '/etc/ssl/etcd/server.crt',
+ 'server_key': '/etc/ssl/etcd/server.key',
+ 'token': '8XG27B',
+ 'cluster_state': 'existing'}
+ '''
+
+ def __init__(self):
+ self.db = unitdata.kv()
+ self.cluster_bind_address = self.get_bind_address('cluster')
+ self.db_bind_address = self.get_bind_address('db')
+ self.port = config('port')
+ self.management_port = config('management_port')
+ # Live polled properties
+ self.public_address = unit_get('public-address')
+ self.cluster_address = get_ingress_address('cluster')
+ self.db_address = get_ingress_address('db')
+ self.unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '')
+
+ # Pull the TLS certificate paths from layer data
+ tls_opts = layer.options('tls-client')
+ ca_path = tls_opts['ca_certificate_path']
+ crt_path = tls_opts['server_certificate_path']
+ key_path = tls_opts['server_key_path']
+
+ # Pull the static etcd configuration from layer-data
+ etcd_opts = layer.options('etcd')
+ self.etcd_conf_dir = etcd_opts['etcd_conf_dir']
+ # This getter determines the current context of the storage path
+ # depending on if durable storage is mounted.
+ self.etcd_data_dir = self.storage_path()
+ self.etcd_daemon = etcd_opts['etcd_daemon_process']
+
+ self.ca_certificate = ca_path
+ self.server_certificate = crt_path
+ self.server_key = key_path
+
+ # Cluster concerns
+ self.cluster = self.db.get('etcd.cluster', '')
+ self.token = self.cluster_token()
+ self.cluster_state = self.db.get('etcd.cluster-state', 'existing')
+
+ def set_cluster(self, value):
+ ''' Set the cluster string for peer registration '''
+ self.cluster = value
+ self.db.set('etcd.cluster', value)
+
+ def set_cluster_state(self, value):
+ ''' Set the cluster state '''
+ self.cluster_state = value
+ self.db.set('etcd.cluster-state', value)
+
+ def cluster_token(self):
+ ''' Getter to return the unique cluster token. '''
+ token = leader_get('token')
+ if not token and is_leader():
+ token = self.id_generator()
+ leader_set({'token': token})
+ return token
+
+ def id_generator(self, size=6):
+ ''' Return a random 6 character string for use in cluster init.
+
+ @params size - The size of the string to return in characters
+ '''
+ chars = string.ascii_uppercase + string.digits
+ return ''.join(random.choice(chars) for _ in range(size))
+
+ def storage_path(self):
+ ''' Storage mounts are limited in snap confinement. Default behavior
+ is to version the database files in $SNAP_DATA. However the user can
+ attach durable storage, which is mounted in /media. We need a common
+ method to determine which storage path we are concerned with '''
+
+ etcd_opts = layer.options('etcd')
+
+ if is_state('data.volume.attached'):
+ return "/media/etcd/data"
+ else:
+ return etcd_opts['etcd_data_dir']
+
+ def get_bind_address(self, endpoint_name):
+ ''' Returns the address that the service binds to. If the config
+ parameter 'bind_to_all_interfaces' is set to true, it returns 0.0.0.0
+ If 'bind_to_all_interfaces' is set to false, it returns the
+ bind address of the endpoint_name received as parameter
+
+ @param endpoint_name name the endpoint from where the
+ bind address is obtained
+ '''
+ if bool(config('bind_to_all_interfaces')):
+ return '0.0.0.0'
+
+ return get_bind_address(endpoint_name)
diff --git a/etcd/lib/etcd_lib.py b/etcd/lib/etcd_lib.py
new file mode 100644
index 0000000..12ef193
--- /dev/null
+++ b/etcd/lib/etcd_lib.py
@@ -0,0 +1,68 @@
+from charmhelpers.core.hookenv import network_get, unit_private_ip
+
+
+def get_ingress_addresses(endpoint_name):
+ ''' Returns all ingress-addresses belonging to the named endpoint, if
+ available. Falls back to private-address if necessary. '''
+ try:
+ data = network_get(endpoint_name)
+ except NotImplementedError:
+ return [unit_private_ip()]
+
+ if 'ingress-addresses' in data:
+ return data['ingress-addresses']
+ else:
+ return [unit_private_ip()]
+
+
+def get_ingress_address(endpoint_name):
+ ''' Returns an ingress-address belonging to the named endpoint, if
+ available. Falls back to private-address if necessary. '''
+ return get_ingress_addresses(endpoint_name)[0]
+
+
+def get_bind_address(endpoint_name):
+ ''' Returns the first bind-address found in network info
+ belonging to the named endpoint, if available.
+ Falls back to private-address if necessary.
+
+ @param endpoint_name the endpoint from where taking the
+ bind address
+ '''
+ try:
+ data = network_get(endpoint_name)
+ except NotImplementedError:
+ return unit_private_ip()
+
+ # Consider that network-get returns something like:
+ #
+ # bind-addresses:
+ # - macaddress: 02:d0:9e:31:d9:e0
+ # interfacename: ens5
+ # addresses:
+ # - hostname: ""
+ # address: 172.31.5.4
+ # cidr: 172.31.0.0/20
+ # - hostname: ""
+ # address: 172.31.5.4
+ # cidr: 172.31.0.0/20
+ # - macaddress: 8a:32:d7:8d:f6:9a
+ # interfacename: fan-252
+ # addresses:
+ # - hostname: ""
+ # address: 252.5.4.1
+ # cidr: 252.0.0.0/12
+ # egress-subnets:
+ # - 172.31.5.4/32
+ # ingress-addresses:
+ # - 172.31.5.4
+ # - 172.31.5.4
+ # - 252.5.4.1
+ if 'bind-addresses' in data:
+ bind_addresses = data['bind-addresses']
+ if len(bind_addresses) > 0:
+ if 'addresses' in bind_addresses[0]:
+ if len(bind_addresses[0]['addresses']) > 0:
+ return bind_addresses[0]['addresses'][0]['address']
+
+ return unit_private_ip()
diff --git a/etcd/lib/etcdctl.py b/etcd/lib/etcdctl.py
new file mode 100644
index 0000000..b903217
--- /dev/null
+++ b/etcd/lib/etcdctl.py
@@ -0,0 +1,214 @@
+from charms import layer
+from charmhelpers.core.hookenv import log
+from subprocess import CalledProcessError
+from subprocess import check_output
+import os
+
+
+def etcdctl_command():
+ if os.path.isfile('/snap/bin/etcd.etcdctl'):
+ return '/snap/bin/etcd.etcdctl'
+ return 'etcdctl'
+
+
+class EtcdCtl:
+ ''' etcdctl modeled as a python class. This python wrapper consumes
+ and exposes some of the commands contained in etcdctl. Related to unit
+ registration, cluster health, and other operations '''
+ class CommandFailed(Exception):
+ pass
+
+ def register(self, cluster_data):
+ ''' Perform self registration against the etcd leader and returns the
+ raw output response.
+
+ @params cluster_data - a dict of data to fill out the request to
+ push our registration to the leader
+ requires keys: leader_address, port, unit_name, cluster_address,
+ management_port
+ '''
+ # Build a connection string for the cluster data.
+ connection = get_connection_string([cluster_data['cluster_address']],
+ cluster_data['management_port'])
+
+ command = 'member add {} {}'.format(
+ cluster_data['unit_name'],
+ connection
+ )
+
+ try:
+ result = self.run(command, endpoints=cluster_data['leader_address'], api=2)
+ except EtcdCtl.CommandFailed:
+ log('Notice: Unit failed self registration', 'WARNING')
+ raise
+
+ # ['Added member named etcd12 with ID b9ab5b5a2e4baec5 to cluster',
+ # '', 'ETCD_NAME="etcd12"',
+ # 'ETCD_INITIAL_CLUSTER="etcd11=https://10.113.96.26:2380,etcd12=https://10.113.96.206:2380"', # noqa
+ # 'ETCD_INITIAL_CLUSTER_STATE="existing"', '']
+
+ reg = {}
+
+ for line in result.split('\n'):
+ if 'ETCD_INITIAL_CLUSTER=' in line:
+ reg['cluster'] = line.split('="')[-1].rstrip('"')
+ return reg
+
+ def unregister(self, unit_id, leader_address=None):
+ ''' Perform self deregistration during unit teardown
+
+ @params unit_id - the ID for the unit assigned by etcd. Obtainable from
+ member_list method.
+
+ @params leader_address - The endpoint to communicate with the leader in
+ the event of self deregistration.
+ '''
+ return self.run(['member', 'remove', unit_id], endpoints=leader_address, api=2)
+
+ def member_list(self, leader_address=False):
+ ''' Returns the output from `etcdctl member list` as a python dict
+ organized by unit_name, containing all the data-points in the resulting
+ response. '''
+ command = 'member list'
+
+ members = {}
+ out = self.run(command, endpoints=leader_address, api=2)
+ raw_member_list = out.strip('\n').split('\n')
+ # Expect output like this:
+ # 4f24ee16c889f6c1: name=etcd20 peerURLs=https://10.113.96.197:2380 clientURLs=https://10.113.96.197:2379 # noqa
+ # edc04bb81479d7e8: name=etcd21 peerURLs=https://10.113.96.243:2380 clientURLs=https://10.113.96.243:2379 # noqa
+ # edc0dsa81479d7e8[unstarted]: peerURLs=https://10.113.96.124:2380 # noqa
+
+ for unit in raw_member_list:
+ if '[unstarted]' in unit:
+ unit_guid = unit.split('[')[0]
+ members['unstarted'] = {'unit_id': unit_guid}
+ if 'peerURLs=' in unit:
+ peer_urls = unit.split(' ')[1].split("=")[-1]
+ members['unstarted']['peer_urls'] = peer_urls
+ continue
+ unit_guid = unit.split(':')[0]
+ unit_name = unit.split(' ')[1].split("=")[-1]
+ peer_urls = unit.split(' ')[2].split("=")[-1]
+ client_urls = unit.split(' ')[3].split("=")[-1]
+
+ members[unit_name] = {'unit_id': unit_guid,
+ 'name': unit_name,
+ 'peer_urls': peer_urls,
+ 'client_urls': client_urls}
+ return members
+
+ def member_update(self, unit_id, uri):
+ ''' Update the etcd cluster member by unit_id with a new uri. This
+ allows us to change protocol, address or port.
+ @params unit_id: The string ID of the unit in the cluster.
+ @params uri: The string universal resource indicator of where to
+ contact the peer. '''
+ out = ''
+ try:
+ command = 'member update {} {}'.format(unit_id, uri)
+ log(command)
+ # Run the member update command for the existing unit_id.
+ out = self.run(command)
+ except EtcdCtl.CommandFailed:
+ log('Failed to update member {}'.format(unit_id), 'WARNING')
+ return out
+
+ def cluster_health(self, output_only=False):
+ ''' Returns the output of etcdctl cluster-health as a python dict
+ organized by topical information with detailed unit output '''
+ health = {}
+ try:
+ out = self.run('cluster-health', endpoints=False, api=2)
+ if output_only:
+ return out
+ health_output = out.strip('\n').split('\n')
+ health['status'] = health_output[-1]
+ health['units'] = health_output[0:-2]
+ except EtcdCtl.CommandFailed:
+ log('Notice: Unit failed cluster-health check', 'WARNING')
+ health['status'] = 'cluster is unhealthy see log file for details.'
+ health['units'] = []
+ return health
+
+ def run(self, arguments, endpoints=None, api=3):
+ ''' Wrapper to subprocess calling output. This is a convenience
+ method to clean up the calls to subprocess and append TLS data'''
+ env = {}
+ command = [etcdctl_command()]
+ opts = layer.options('tls-client')
+ ca_path = opts['ca_certificate_path']
+ crt_path = opts['server_certificate_path']
+ key_path = opts['server_key_path']
+
+ if api == 3:
+ env['ETCDCTL_API'] = '3'
+ env['ETCDCTL_CACERT'] = ca_path
+ env['ETCDCTL_CERT'] = crt_path
+ env['ETCDCTL_KEY'] = key_path
+ if endpoints is None:
+ endpoints = 'http://127.0.0.1:4001'
+
+ elif api == 2:
+ env['ETCDCTL_API'] = '2'
+ env['ETCDCTL_CA_FILE'] = ca_path
+ env['ETCDCTL_CERT_FILE'] = crt_path
+ env['ETCDCTL_KEY_FILE'] = key_path
+ if endpoints is None:
+ endpoints = ':4001'
+
+ else:
+ raise NotImplementedError(
+ 'etcd api version {} not supported'.format(api))
+
+ if isinstance(arguments, str):
+ command.extend(arguments.split())
+ elif isinstance(arguments, list) or isinstance(arguments, tuple):
+ command.extend(arguments)
+ else:
+ raise RuntimeError(
+ 'arguments not correct type; must be string, list or tuple')
+
+ if endpoints is not False:
+ if api == 3:
+ command.extend(['--endpoints', endpoints])
+ elif api == 2:
+ command.insert(1, '--endpoint')
+ command.insert(2, endpoints)
+
+ try:
+ return check_output(
+ command,
+ env=env
+ ).decode('utf-8')
+ except CalledProcessError as e:
+ log(command, 'ERROR')
+ log(env, 'ERROR')
+ log(e.stdout, 'ERROR')
+ log(e.stderr, 'ERROR')
+ raise EtcdCtl.CommandFailed() from e
+
+ def version(self):
+ ''' Return the version of etcdctl '''
+ out = check_output(
+ [etcdctl_command(), 'version'],
+ env={'ETCDCTL_API': '3'}
+ ).decode('utf-8')
+
+ if out == "No help topic for 'version'\n":
+ # Probably on etcd2
+ out = check_output(
+ [etcdctl_command(), '--version']
+ ).decode('utf-8')
+
+ return out.split('\n')[0].split()[2]
+
+
+def get_connection_string(members, port, protocol='https'):
+ ''' Return a connection string for the list of members using the provided
+ port and protocol (defaults to https)'''
+ connections = []
+ for address in members:
+ connections.append('{}://{}:{}'.format(protocol, address, port))
+ connection_string = ','.join(connections)
+ return connection_string
diff --git a/etcd/make_docs b/etcd/make_docs
new file mode 100644
index 0000000..dcd4c1f
--- /dev/null
+++ b/etcd/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import os
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ sys.path.insert(0, 'lib')
+ sys.path.insert(1, 'reactive')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
+ if os.path.exists('.unit-state.db'):
+ os.remove('.unit-state.db')
diff --git a/etcd/metadata.yaml b/etcd/metadata.yaml
new file mode 100644
index 0000000..437060b
--- /dev/null
+++ b/etcd/metadata.yaml
@@ -0,0 +1,55 @@
+"name": "etcd"
+"summary": "Deploy a TLS terminated ETCD Cluster"
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Rye Terrell "
+- "Konstantinos Tsakalozos "
+- "Charles Butler "
+"description": |
+ This charm supports deploying Etcd from the upstream binaries with resources.
+ It will also TLS wrap your service, and distribute client keys to any service
+ connecting. Etcd is a highly available key/value store.
+"tags":
+- "misc"
+- "database"
+- "keystore"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "certificates":
+ "interface": "tls-certificates"
+"provides":
+ "nrpe-external-master":
+ "interface": "nrpe-external-master"
+ "scope": "container"
+ "db":
+ "interface": "etcd"
+ "proxy":
+ "interface": "etcd-proxy"
+"peers":
+ "cluster":
+ "interface": "etcd"
+"resources":
+ "snapshot":
+ "type": "file"
+ "filename": "snapshot.tar.gz"
+ "description": "Tarball snapshot of an etcd clusters data."
+ "core":
+ "type": "file"
+ "filename": "core.snap"
+ "description": "Snap package of core"
+ "etcd":
+ "type": "file"
+ "filename": "etcd.snap"
+ "description": "Snap package of etcd"
+"storage":
+ "data":
+ "type": "block"
+ "description": "Data volume to store the Etcd wal file and data."
+ "multiple":
+ "range": "0-1"
+ "minimum-size": "1G"
+"subordinate": !!bool "false"
diff --git a/etcd/pydocmd.yml b/etcd/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/etcd/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/etcd/reactive/__init__.py b/etcd/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/etcd/reactive/cdk_service_kicker.py b/etcd/reactive/cdk_service_kicker.py
new file mode 100644
index 0000000..f7fd33a
--- /dev/null
+++ b/etcd/reactive/cdk_service_kicker.py
@@ -0,0 +1,32 @@
+import os
+import subprocess
+from charms import layer
+from charms.reactive import hook, when_not, remove_state, set_state
+from charmhelpers.core.templating import render
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ remove_state('cdk-service-kicker.installed')
+
+
+@when_not('cdk-service-kicker.installed')
+def install_cdk_service_kicker():
+ ''' Installs the cdk-service-kicker service. Workaround for
+ https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+ '''
+ source = 'cdk-service-kicker'
+ dest = '/usr/bin/cdk-service-kicker'
+ services = layer.options('cdk-service-kicker').get('services')
+ context = {'services': ' '.join(services)}
+ render(source, dest, context)
+ os.chmod('/usr/bin/cdk-service-kicker', 0o775)
+
+ source = 'cdk-service-kicker.service'
+ dest = '/etc/systemd/system/cdk-service-kicker.service'
+ context = {}
+ render(source, dest, context)
+ command = ['systemctl', 'enable', 'cdk-service-kicker']
+ subprocess.check_call(command)
+
+ set_state('cdk-service-kicker.installed')
diff --git a/etcd/reactive/etcd.py b/etcd/reactive/etcd.py
new file mode 100644
index 0000000..4a24d80
--- /dev/null
+++ b/etcd/reactive/etcd.py
@@ -0,0 +1,941 @@
+#!/usr/bin/python3
+
+from charms import layer
+
+from charms.layer import snap
+
+from charms.reactive import endpoint_from_flag
+from charms.reactive import when
+from charms.reactive import when_any
+from charms.reactive import when_not
+from charms.reactive import is_state
+from charms.reactive import set_state
+from charms.reactive import is_flag_set
+from charms.reactive import clear_flag
+from charms.reactive import remove_state
+from charms.reactive import hook
+from charms.reactive.helpers import data_changed
+
+from charms.templating.jinja2 import render
+
+from charmhelpers.core.hookenv import log
+from charmhelpers.core.hookenv import leader_set
+from charmhelpers.core.hookenv import leader_get
+from charmhelpers.core.hookenv import storage_get
+
+from charmhelpers.core.hookenv import application_version_set
+from charmhelpers.core.hookenv import open_port
+from charmhelpers.core.hookenv import close_port
+from charmhelpers.core.host import write_file
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charms.layer import status
+
+from etcdctl import EtcdCtl
+from etcdctl import get_connection_string
+from etcd_databag import EtcdDatabag
+from etcd_lib import get_ingress_address, get_ingress_addresses
+
+from shlex import split
+from subprocess import check_call
+from subprocess import check_output
+from subprocess import CalledProcessError
+from shutil import copyfile
+
+import os
+import charms.leadership # noqa
+import socket
+import time
+import traceback
+import yaml
+import shutil
+import random
+
+
+# Layer Note: the @when_not etcd.installed state checks are relating to
+# a boundry that was superimposed by the etcd-24 release which added support
+# for snaps. Snapped etcd is now the only supported mechanism by this charm.
+# References to this state will be wiped sometime within the next 10 releases
+# of the charm.
+
+
+# Override the default nagios shortname regex to allow periods, which we
+# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
+# default regex in charmhelpers doesn't allow periods, but nagios itself does.
+nrpe.Check.shortname_re = r'[\.A-Za-z0-9-_]+$'
+
+
+def get_target_etcd_channel():
+ """
+ Check whether or not etcd is already installed. i.e. we're
+ going through an upgrade. If so, leave the etcd version alone,
+ if we're a new install, we can set the default channel here.
+
+ If the user has specified a version, then just return that.
+
+ :return: String snap channel
+ """
+ channel = hookenv.config('channel')
+ if channel == 'auto':
+ if snap.is_installed('etcd'):
+ return False
+ else:
+ return '3.4/stable'
+ else:
+ return channel
+
+
+@when('etcd.installed')
+def snap_upgrade_notice():
+ status.blocked('Manual migration required. http://bit.ly/2oznAUZ')
+
+
+@when_any('etcd.registered', 'etcd.leader.configured')
+@when_not('etcd.installed')
+@when_not('upgrade.series.in-progress')
+def check_cluster_health():
+ ''' report on the cluster health every 5 minutes'''
+ etcdctl = EtcdCtl()
+ health = etcdctl.cluster_health()
+
+ # Determine if the unit is healthy or unhealthy
+ if 'unhealthy' in health['status']:
+ unit_health = "UnHealthy"
+ else:
+ unit_health = "Healthy"
+
+ # Determine units peer count, and surface 0 by default
+ try:
+ peers = len(etcdctl.member_list())
+ except Exception:
+ unit_health = "Errored"
+ peers = 0
+
+ bp = "{0} with {1} known peer{2}"
+ status_message = bp.format(unit_health, peers, 's' if peers != 1 else '')
+
+ status.active(status_message)
+
+
+@when('snap.installed.etcd')
+@when_not('etcd.installed')
+def set_app_version():
+ ''' Surface the etcd application version on juju status '''
+ # note - the snap doesn't place an etcd alias on disk. This shall infer
+ # the version from etcdctl, as the snap distributes both in lockstep.
+ application_version_set(etcd_version())
+
+
+@when_not('certificates.available')
+def missing_relation_notice():
+ status.blocked('Missing relation to certificate authority.')
+
+
+@when('certificates.available')
+def prepare_tls_certificates(tls):
+ common_name = hookenv.unit_public_ip()
+ sans = set()
+ sans.add(hookenv.unit_public_ip())
+ sans.update(get_ingress_addresses('db'))
+ sans.update(get_ingress_addresses('cluster'))
+ sans.add(socket.gethostname())
+
+ # add cluster peers as alt names when present
+ cluster = endpoint_from_flag('cluster.joined')
+ if cluster:
+ for ip in cluster.get_db_ingress_addresses():
+ sans.add(ip)
+
+ sans = sorted(sans)
+ certificate_name = hookenv.local_unit().replace('/', '_')
+ tls.request_server_cert(common_name, sans, certificate_name)
+
+
+@hook('upgrade-charm')
+def remove_states():
+ # stale state cleanup (pre rev6)
+ remove_state('etcd.tls.secured')
+ remove_state('etcd.ssl.placed')
+ remove_state('etcd.ssl.exported')
+ remove_state('etcd.nrpe.configured')
+ # force a config re-render in case template changed
+ set_state('etcd.rerender-config')
+
+
+@hook('pre-series-upgrade')
+def pre_series_upgrade():
+ bag = EtcdDatabag()
+ host.service_pause(bag.etcd_daemon)
+ status.blocked('Series upgrade in progress')
+
+
+@hook('post-series-upgrade')
+def post_series_upgrade():
+ bag = EtcdDatabag()
+ host.service_resume(bag.etcd_daemon)
+
+
+@when('snap.installed.etcd')
+@when('leadership.is_leader')
+@when_any('config.changed.port', 'config.changed.management_port')
+@when_not('etcd.installed')
+@when_not('upgrade.series.in-progress')
+def leader_config_changed():
+ ''' The leader executes the runtime configuration update for the cluster,
+ as it is the controlling unit. Will render config, close and open ports and
+ restart the etcd service.'''
+ configuration = hookenv.config()
+ previous_port = configuration.previous('port')
+ log('Previous port: {0}'.format(previous_port))
+ previous_mgmt_port = configuration.previous('management_port')
+ log('Previous management port: {0}'.format(previous_mgmt_port))
+
+ if previous_port and previous_mgmt_port:
+ bag = EtcdDatabag()
+ etcdctl = EtcdCtl()
+ members = etcdctl.member_list()
+ # Iterate over all the members in the list.
+ for unit_name in members:
+ # Grab the previous peer url and replace the management port.
+ peer_urls = members[unit_name]['peer_urls']
+ log('Previous peer url: {0}'.format(peer_urls))
+ old_port = ':{0}'.format(previous_mgmt_port)
+ new_port = ':{0}'.format(configuration.get('management_port'))
+ url = peer_urls.replace(old_port, new_port)
+ # Update the member's peer_urls with the new ports.
+ log(etcdctl.member_update(members[unit_name]['unit_id'], url))
+ # Render just the leaders configuration with the new values.
+ render_config()
+ address = get_ingress_address('cluster')
+ leader_set({'leader_address':
+ get_connection_string([address],
+ bag.management_port)})
+ host.service_restart(bag.etcd_daemon)
+
+
+@when('snap.installed.etcd')
+@when_not('leadership.is_leader')
+@when_any('config.changed.port', 'config.changed.management_port')
+@when_not('etcd.installed')
+def follower_config_changed():
+ ''' Follower units need to render the configuration file, close and open
+ ports, and restart the etcd service. '''
+ set_state('etcd.rerender-config')
+
+
+@when('snap.installed.etcd')
+@when('config.changed.bind_to_all_interfaces')
+@when_not('upgrade.series.in-progress')
+def bind_to_all_interfaces_changed():
+ set_state('etcd.rerender-config')
+
+
+@when('etcd.rerender-config')
+@when_not('upgrade.series.in-progress')
+def rerender_config():
+ ''' Config must be updated and service restarted '''
+ bag = EtcdDatabag()
+ log('Rendering config file for {0}'.format(bag.unit_name))
+ render_config()
+ if host.service_running(bag.etcd_daemon):
+ host.service_restart(bag.etcd_daemon)
+ set_app_version()
+
+
+@when('cluster.joined')
+def set_db_ingress_address(cluster):
+ ''' Send db ingress address to peers on the cluster relation '''
+ address = get_ingress_address('db')
+ cluster.set_db_ingress_address(address)
+
+
+@when('db.connected')
+@when('etcd.ssl.placed')
+@when('cluster.joined')
+def send_cluster_connection_details(cluster, db):
+ ''' Need to set the cluster connection string and
+ the client key and certificate on the relation object. '''
+ cert = read_tls_cert('client.crt')
+ key = read_tls_cert('client.key')
+ ca = read_tls_cert('ca.crt')
+ etcdctl = EtcdCtl()
+
+ # Set the key, cert, and ca on the db relation
+ db.set_client_credentials(key, cert, ca)
+
+ port = hookenv.config().get('port')
+ # Get all the peers participating in the cluster relation.
+ members = cluster.get_db_ingress_addresses()
+ # Append our own address to the membership list, because peers dont self
+ # actualize
+ address = get_ingress_address('db')
+ members.append(address)
+ members.sort()
+ # Create a connection string with all the members on the configured port.
+ connection_string = get_connection_string(members, port)
+ # Set the connection string on the db relation.
+ db.set_connection_string(connection_string, version=etcdctl.version())
+
+
+@when('db.connected')
+@when('etcd.ssl.placed')
+@when_not('cluster.joined')
+def send_single_connection_details(db):
+ ''' '''
+ cert = read_tls_cert('client.crt')
+ key = read_tls_cert('client.key')
+ ca = read_tls_cert('ca.crt')
+
+ etcdctl = EtcdCtl()
+
+ # Set the key and cert on the db relation
+ db.set_client_credentials(key, cert, ca)
+
+ bag = EtcdDatabag()
+ # Get all the peers participating in the cluster relation.
+ address = get_ingress_address('db')
+ members = [address]
+ # Create a connection string with this member on the configured port.
+ connection_string = get_connection_string(members, bag.port)
+ # Set the connection string on the db relation.
+ db.set_connection_string(connection_string, version=etcdctl.version())
+
+
+@when('proxy.connected')
+@when('etcd.ssl.placed')
+@when_any('etcd.leader.configured', 'cluster.joined')
+def send_cluster_details(proxy):
+ ''' Sends the peer cluster string to proxy units so they can join and act
+ on behalf of the cluster. '''
+ cert = read_tls_cert('client.crt')
+ key = read_tls_cert('client.key')
+ ca = read_tls_cert('ca.crt')
+ proxy.set_client_credentials(key, cert, ca)
+
+ # format a list of cluster participants
+ etcdctl = EtcdCtl()
+ peers = etcdctl.member_list()
+ cluster = []
+ for peer in peers:
+ thispeer = peers[peer]
+ # Potential member doing registration. Default to skip
+ if 'peer_urls' not in thispeer.keys() or not thispeer['peer_urls']:
+ continue
+ peer_string = "{}={}".format(thispeer['name'], thispeer['peer_urls'])
+ cluster.append(peer_string)
+
+ proxy.set_cluster_string(','.join(cluster))
+
+
+@when('config.changed.channel')
+def channel_changed():
+ ''' Ensure that the config is updated if the channel changes. '''
+ set_state('etcd.rerender-config')
+
+
+@when('config.changed.channel')
+@when_not('etcd.installed')
+def snap_install():
+ channel = get_target_etcd_channel()
+ snap.install('core')
+ if channel:
+ snap.install('etcd', channel=channel, classic=False)
+ remove_state('etcd.ssl.exported')
+
+
+@when('etcd.ssl.placed')
+@when_not('snap.installed.etcd')
+def install_etcd():
+ ''' Attempt resource get on the "etcd" and "etcdctl" resources. If no
+ resources are provided attempt to install from the archive only on the
+ 16.04 (xenial) series. '''
+
+ if is_state('etcd.installed'):
+ msg = 'Manual upgrade required. run-action snap-upgrade.'
+ status.blocked(msg)
+ return
+
+ status.maintenance('Installing etcd.')
+
+ channel = get_target_etcd_channel()
+ if channel:
+ snap.install('etcd', channel=channel, classic=False)
+
+
+@when('snap.installed.etcd')
+@when_not('etcd.service-restart.configured')
+@when_not('upgrade.series.in-progress')
+def add_systemd_restart_always():
+ template = 'templates/service-always-restart.systemd-latest.conf'
+ service = 'snap.etcd.etcd'
+
+ try:
+ # Get the systemd version
+ cmd = ['systemd', '--version']
+ output = check_output(cmd).decode('UTF-8')
+ line = output.splitlines()[0]
+ words = line.split()
+ assert words[0] == 'systemd'
+ systemd_version = int(words[1])
+
+ # Check for old version (for xenial support)
+ if systemd_version < 230:
+ template = 'templates/service-always-restart.systemd-229.conf'
+ except Exception:
+ traceback.print_exc()
+ hookenv.log('Failed to detect systemd version, using latest template',
+ level='ERROR')
+
+ dest_dir = '/etc/systemd/system/{}.service.d'.format(service)
+ os.makedirs(dest_dir, exist_ok=True)
+ copyfile(template, '{}/always-restart.conf'.format(dest_dir))
+ check_call(['systemctl', 'daemon-reload'])
+ host.service_restart('{}.service'.format(service))
+ set_state('etcd.service-restart.configured')
+
+
+@when('snap.installed.etcd')
+@when('etcd.ssl.placed')
+@when('cluster.joined')
+@when_not('leadership.is_leader')
+@when_not('etcd.registered')
+@when_not('etcd.installed')
+@when_not('upgrade.series.in-progress')
+def register_node_with_leader(cluster):
+ '''
+ Control flow mechanism to perform self registration with the leader.
+
+ Before executing self registration, we must adhere to the nature of offline
+ static turnup rules. If we find a GUID in the member list without peering
+ information the unit will enter a race condition and must wait for a clean
+ status output before we can progress to self registration.
+ '''
+ etcdctl = EtcdCtl()
+ bag = EtcdDatabag()
+ leader_address = leader_get('leader_address')
+ bag.leader_address = leader_address
+
+ try:
+ # Check if we are already registered. Unregister ourselves if we are so
+ # we can register from scratch.
+ peer_url = 'https://%s:%s' % (bag.cluster_address, bag.management_port)
+ members = etcdctl.member_list(leader_address)
+ for _, member in members.items():
+ if member['peer_urls'] == peer_url:
+ log('Found member that matches our peer URL. Unregistering...')
+ etcdctl.unregister(member['unit_id'], leader_address)
+
+ # Now register.
+ resp = etcdctl.register(bag.__dict__)
+ bag.set_cluster(resp['cluster'])
+ except EtcdCtl.CommandFailed:
+ log('etcdctl.register failed, will retry')
+ msg = 'Waiting to retry etcd registration'
+ status.waiting(msg)
+ return
+
+ render_config(bag)
+ host.service_restart(bag.etcd_daemon)
+ open_port(bag.port)
+ set_state('etcd.registered')
+
+
+@when('etcd.ssl.placed')
+@when('leadership.is_leader')
+@when_not('etcd.leader.configured')
+@when_not('etcd.installed')
+@when_not('upgrade.series.in-progress')
+def initialize_new_leader():
+ ''' Create an initial cluster string to bring up a single member cluster of
+ etcd, and set the leadership data so the followers can join this one. '''
+ bag = EtcdDatabag()
+ bag.token = bag.token
+ bag.set_cluster_state('new')
+ address = get_ingress_address('cluster')
+ cluster_connection_string = get_connection_string([address],
+ bag.management_port)
+ bag.set_cluster("{}={}".format(bag.unit_name, cluster_connection_string))
+
+ render_config(bag)
+ host.service_restart(bag.etcd_daemon)
+
+ # sorry, some hosts need this. The charm races with systemd and wins.
+ time.sleep(2)
+
+ # Check health status before we say we are good
+ etcdctl = EtcdCtl()
+ status = etcdctl.cluster_health()
+ if 'unhealthy' in status:
+ status.blocked('Cluster not healthy.')
+ return
+ # We have a healthy leader, broadcast initial data-points for followers
+ open_port(bag.port)
+ leader_connection_string = get_connection_string([address],
+ bag.port)
+ leader_set({'leader_address': leader_connection_string,
+ 'cluster': bag.cluster})
+
+ # set registered state since if we ever become a follower, we will not need
+ # to re-register
+ set_state('etcd.registered')
+
+ # finish bootstrap delta and set configured state
+ set_state('etcd.leader.configured')
+
+
+@when('snap.installed.etcd')
+@when('snap.refresh.set')
+@when('leadership.is_leader')
+def process_snapd_timer():
+ ''' Set the snapd refresh timer on the leader so all cluster members
+ (present and future) will refresh near the same time. '''
+ # Get the current snapd refresh timer; we know layer-snap has set this
+ # when the 'snap.refresh.set' flag is present.
+ timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8').strip()
+ if not timer:
+ # The core snap timer is empty. This likely means a subordinate timer
+ # reset ours. Try to set it back to a previously leader-set value,
+ # falling back to config if needed. Luckily, this should only happen
+ # during subordinate install, so this should remain stable afterward.
+ timer = leader_get('snapd_refresh') or hookenv.config('snapd_refresh')
+ snap.set_refresh_timer(timer)
+
+ # Ensure we have the timer known by snapd (it may differ from config).
+ timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8').strip()
+
+ # The first time through, data_changed will be true. Subsequent calls
+ # should only update leader data if something changed.
+ if data_changed('etcd_snapd_refresh', timer):
+ log('setting snapd_refresh timer to: {}'.format(timer))
+ leader_set({'snapd_refresh': timer})
+
+
+@when('snap.installed.etcd')
+@when('snap.refresh.set')
+@when('leadership.changed.snapd_refresh')
+@when_not('leadership.is_leader')
+def set_snapd_timer():
+ ''' Set the snapd refresh.timer on non-leader cluster members. '''
+ # NB: This method should only be run when 'snap.refresh.set' is present.
+ # Layer-snap will always set a core refresh.timer, which may not be the
+ # same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
+ # has finished and we are free to set our config to the leader's timer.
+ timer = leader_get('snapd_refresh') or '' # None will cause error
+ log('setting snapd_refresh timer to: {}'.format(timer))
+ snap.set_refresh_timer(timer)
+
+
+@when('tls_client.ca.saved', 'tls_client.server.key.saved',
+ 'tls_client.server.certificate.saved',
+ 'tls_client.client.certificate.saved')
+@when_not('etcd.ssl.placed')
+def tls_state_control():
+ ''' This state represents all the complexity of handling the TLS certs.
+ instead of stacking decorators, this state condenses it into a single
+ state we can gate on before progressing with secure setup. Also handles
+ ensuring users of the system can access the TLS certificates'''
+
+ bag = EtcdDatabag()
+ if not os.path.isdir(bag.etcd_conf_dir):
+ hookenv.log('Waiting for etcd conf creation.')
+ return
+ cmd = ['chown', '-R', 'root:ubuntu', bag.etcd_conf_dir]
+ check_call(cmd)
+ set_state('etcd.ssl.placed')
+
+
+@when('etcd.ssl.placed')
+@when_any('tls_client.ca.written',
+ 'tls_client.server.certificate.written',
+ 'tls_client.client.certificate.written')
+@when_not('upgrade.series.in-progress')
+def tls_update():
+ ''' Handle changes to the TLS data by ensuring that the service is
+ restarted.
+ '''
+ # ensure config is updated with new certs and service restarted
+ bag = EtcdDatabag()
+ render_config(bag)
+ host.service_restart(bag.etcd_daemon)
+
+ # ensure that certs are re-echoed to the db relations
+ remove_state('etcd.ssl.placed')
+ remove_state('tls_client.ca.written')
+ remove_state('tls_client.server.certificate.written')
+ remove_state('tls_client.client.certificate.written')
+
+
+@when('snap.installed.etcd')
+@when_not('etcd.ssl.exported')
+def render_default_user_ssl_exports():
+ ''' Add secure credentials to default user environment configs,
+ transparently adding TLS '''
+ opts = layer.options('tls-client')
+
+ ca_path = opts['ca_certificate_path']
+ client_crt = opts['client_certificate_path']
+ client_key = opts['client_key_path']
+
+ etcd_ver = etcd_version()
+ if etcd_ver == 'n/a':
+ hookenv.log('Unable to determine version format for etcd SSL config',
+ level=hookenv.ERROR)
+ return
+ major, minor, _ = etcd_ver.split('.')
+
+ if int(major) >= 3 and int(minor) >= 3:
+ evars = [
+ 'export ETCDCTL_KEY={}\n'.format(client_key),
+ 'export ETCDCTL_CERT={}\n'.format(client_crt),
+ 'export ETCDCTL_CACERT={}\n'.format(ca_path)
+ ]
+ else:
+ evars = [
+ 'export ETCDCTL_KEY_FILE={}\n'.format(client_key),
+ 'export ETCDCTL_CERT_FILE={}\n'.format(client_crt),
+ 'export ETCDCTL_CA_FILE={}\n'.format(ca_path)
+ ]
+
+ with open('/home/ubuntu/.bash_aliases', 'w') as fp:
+ fp.writelines(evars)
+ with open('/root/.bash_aliases', 'w') as fp:
+ fp.writelines(evars)
+
+ set_state('etcd.ssl.exported')
+
+
+def force_rejoin():
+ """Wipe local data and rejoin new cluster formed by leader unit
+
+ This action is required if leader unit performed snapshot restore. All
+ other members must remove their local data and previous cluster
+ identities and join newly formed, restored, cluster.
+ """
+ log('Wiping local storage and rejoining cluster')
+ conf = EtcdDatabag()
+ host.service_stop(conf.etcd_daemon)
+ clear_flag('etcd.registered')
+ etcd_data = os.path.join(conf.storage_path(), 'member')
+ if os.path.exists(etcd_data):
+ shutil.rmtree(etcd_data)
+ for _ in range(11):
+ # We need randomized back-off timer because only one unit can be
+ # joining at the same time
+ time.sleep(random.randint(1, 10))
+ register_node_with_leader(None)
+ if is_flag_set('etcd.registered'):
+ log('Successfully rejoined the cluster')
+ break
+
+
+@when('leadership.changed.force_rejoin')
+@when_not('leadership.is_leader')
+def force_rejoin_requested():
+ force_rejoin()
+ check_cluster_health()
+
+
+@hook('cluster-relation-broken')
+def perform_self_unregistration(cluster=None):
+ ''' Attempt self removal during unit teardown. '''
+ etcdctl = EtcdCtl()
+ leader_address = leader_get('leader_address')
+ unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '')
+ members = etcdctl.member_list()
+ # Self Unregistration
+ etcdctl.unregister(members[unit_name]['unit_id'], leader_address)
+
+
+@hook('data-storage-attached')
+def format_and_mount_storage():
+ ''' This allows users to request persistent volumes from the cloud provider
+ for the purposes of disaster recovery. '''
+ set_state('data.volume.attached')
+ # Query juju for the information about the block storage
+ device_info = storage_get()
+ block = device_info['location']
+ bag = EtcdDatabag()
+ bag.cluster = leader_get('cluster')
+ # the databag has behavior that keeps the path updated.
+ # Reference the default path from layer_options.
+ etcd_opts = layer.options('etcd')
+ # Split the tail of the path to mount the volume 1 level before
+ # the data directory.
+ tail = os.path.split(bag.etcd_data_dir)[0]
+
+ if volume_is_mounted(block):
+ hookenv.log('Device is already attached to the system.')
+ hookenv.log('Refusing to take action against {}'.format(block))
+ return
+
+ # Format the device in non-interactive mode
+ cmd = ['mkfs.ext4', device_info['location'], '-F']
+ hookenv.log('Creating filesystem on {}'.format(device_info['location']))
+ hookenv.log('With command: {}'.format(' '.join(cmd)))
+ check_call(cmd)
+
+ # halt etcd to perform the data-store migration
+ host.service_stop(bag.etcd_daemon)
+
+ os.makedirs(tail, exist_ok=True)
+ mount_volume(block, tail)
+ # handle first run during early-attach storage, pre-config-changed hook.
+ os.makedirs(bag.etcd_data_dir, exist_ok=True)
+
+ # Only attempt migration if directory exists
+ if os.path.isdir(etcd_opts['etcd_data_dir']):
+ migrate_path = "{}/".format(etcd_opts['etcd_data_dir'])
+ output_path = "{}/".format(bag.etcd_data_dir)
+ cmd = ['rsync', '-azp', migrate_path, output_path]
+
+ hookenv.log('Detected existing data, migrating to new location.')
+ hookenv.log('With command: {}'.format(' '.join(cmd)))
+
+ check_call(cmd)
+
+ with open('/etc/fstab', 'r') as fp:
+ contents = fp.readlines()
+
+ found = 0
+ # scan fstab for the device
+ for line in contents:
+ if block in line:
+ found = found + 1
+
+ # if device not in fstab, append so it persists through reboots
+ if not found > 0:
+ append = "{0} {1} ext4 defaults 0 0".format(block, tail) # noqa
+ with open('/etc/fstab', 'a') as fp:
+ fp.writelines([append])
+
+ # Finally re-render the configuration and resume operation
+ render_config(bag)
+ host.service_restart(bag.etcd_daemon)
+
+
+def read_tls_cert(cert):
+ ''' Reads the contents of the layer-configured certificate path indicated
+ by cert. Returns the utf-8 decoded contents of the file '''
+ # Load the layer options for configured paths
+ opts = layer.options('tls-client')
+
+ # Retain a dict of the certificate paths
+ cert_paths = {'ca.crt': opts['ca_certificate_path'],
+ 'server.crt': opts['server_certificate_path'],
+ 'server.key': opts['server_key_path'],
+ 'client.crt': opts['client_certificate_path'],
+ 'client.key': opts['client_key_path']}
+
+ # If requesting a cert we dont know about, raise a ValueError
+ if cert not in cert_paths.keys():
+ raise ValueError('No known certificate {}'.format(cert))
+
+ # Read the contents of the cert and return it in utf-8 encoded text
+ with open(cert_paths[cert], 'r') as fp:
+ data = fp.read()
+ return data
+
+
+@when('nrpe-external-master.available')
+@when_not('nrpe-external-master.initial-config')
+def initial_nrpe_config(nagios=None):
+ set_state('nrpe-external-master.initial-config')
+ update_nrpe_config(nagios)
+
+
+@when_any('config.changed.nagios_context',
+ 'config.changed.nagios_servicegroups')
+def force_update_nrpe_config():
+ remove_state('etcd.nrpe.configured')
+
+
+@when('etcd.installed')
+@when('nrpe-external-master.available')
+@when_not('etcd.nrpe.configured')
+def update_nrpe_config(unused=None):
+ # List of systemd services that will be checked
+ services = ('snap.etcd.etcd',)
+
+ # The current nrpe-external-master interface doesn't handle a lot of logic,
+ # use the charm-helpers code for now.
+ hostname = nrpe.get_nagios_hostname()
+ current_unit = nrpe.get_nagios_unit_name()
+ nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
+ # add our first check, to alert on service failure
+ nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
+
+ # add the cron job to populate the cache for our second check
+ # (we cache the output of 'etcdctl alarm list' to minimise overhead)
+ with open("templates/check_etcd-alarms.cron") as fp:
+ write_file(
+ path="/etc/cron.d/check_etcd-alarms",
+ content=fp.read().encode(),
+ owner="root",
+ perms=0o644,
+ )
+
+ # create an empty output file for the above
+ write_file(
+ path="/var/lib/nagios/etcd-alarm-list.txt",
+ content="",
+ owner="root",
+ perms=0o644,
+ )
+
+ # install the NRPE script for the above
+ with open("templates/check_etcd-alarms.py") as fp:
+ write_file(
+ path="/usr/lib/nagios/plugins/check_etcd-alarms.py",
+ content=fp.read().encode(),
+ owner="root",
+ perms=0o755,
+ )
+
+ # define our second check, to alert on etcd alarm status
+ nrpe_setup.add_check(
+ "etcd-alarms",
+ "Verify etcd has no raised alarms",
+ "/usr/lib/nagios/plugins/check_etcd-alarms.py",
+ )
+
+ nrpe_setup.write()
+ set_state('etcd.nrpe.configured')
+
+
+@when_not('nrpe-external-master.available')
+@when('nrpe-external-master.initial-config')
+def remove_nrpe_config(nagios=None):
+ remove_state('nrpe-external-master.initial-config')
+
+ # List of systemd services for which the checks will be removed
+ services = ('snap.etcd.etcd',)
+
+ # The current nrpe-external-master interface doesn't handle a lot of logic,
+ # use the charm-helpers code for now.
+ hostname = nrpe.get_nagios_hostname()
+ nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False)
+
+ for service in services:
+ nrpe_setup.remove_check(shortname=service)
+
+
+def volume_is_mounted(volume):
+ ''' Takes a hardware path and returns true/false if it is mounted '''
+ cmd = ['df', '-t', 'ext4']
+ out = check_output(cmd).decode('utf-8')
+ return volume in out
+
+
+def mount_volume(volume, location):
+ ''' Takes a device path and mounts it to location '''
+ cmd = ['mount', volume, location]
+ hookenv.log("Mounting {0} to {1}".format(volume, location))
+ check_call(cmd)
+
+
+def unmount_path(location):
+ ''' Unmounts a mounted volume at path '''
+ cmd = ['umount', location]
+ hookenv.log("Unmounting {0}".format(location))
+ check_call(cmd)
+
+
+def close_open_ports():
+ ''' Close the previous port and open the port from configuration. '''
+ configuration = hookenv.config()
+ previous_port = configuration.previous('port')
+ port = configuration.get('port')
+ if previous_port is not None and previous_port != port:
+ log('The port changed; closing {0} opening {1}'.format(previous_port,
+ port))
+ close_port(previous_port)
+ open_port(port)
+
+
+def install(src, tgt):
+ ''' This method wraps the bash "install" command '''
+ return check_call(split('install {} {}'.format(src, tgt)))
+
+
+def render_config(bag=None):
+ ''' Render the etcd configuration template for the given version '''
+ if not bag:
+ bag = EtcdDatabag()
+
+ move_etcd_data_to_standard_location()
+
+ v2_conf_path = "{}/etcd.conf".format(bag.etcd_conf_dir)
+ v3_conf_path = "{}/etcd.conf.yml".format(bag.etcd_conf_dir)
+
+ # probe for 2.x compatibility
+ if etcd_version().startswith('2.'):
+ render('etcd2.conf', v2_conf_path, bag.__dict__, owner='root',
+ group='root')
+ # default to 3.x template behavior
+ else:
+ render('etcd3.conf', v3_conf_path, bag.__dict__, owner='root',
+ group='root')
+ if os.path.exists(v2_conf_path):
+ # v3 will fail if the v2 config is left in place
+ os.remove(v2_conf_path)
+ # Close the previous client port and open the new one.
+ close_open_ports()
+ remove_state('etcd.rerender-config')
+
+
+def etcd_version():
+ ''' This method surfaces the version from etcdctl '''
+ raw_output = None
+ try:
+ # try v3
+ raw_output = check_output(
+ ['/snap/bin/etcd.etcdctl', 'version'],
+ env={'ETCDCTL_API': '3'}
+ ).decode('utf-8').strip()
+ if "No help topic for 'version'" in raw_output:
+ # handle v2
+ raw_output = check_output(
+ ['/snap/bin/etcd.etcdctl', '--version']
+ ).decode('utf-8').strip()
+ for line in raw_output.splitlines():
+ if 'etcdctl version' in line:
+ # "etcdctl version: 3.0.17" or "etcdctl version 2.3.8"
+ version = line.split()[-1]
+ return version
+ hookenv.log('Unable to find etcd version: {}'.format(raw_output),
+ level=hookenv.ERROR)
+ return 'n/a'
+ except (ValueError, CalledProcessError):
+ hookenv.log('Failed to get etcd version:\n'
+ '{}'.format(traceback.format_exc()), level=hookenv.ERROR)
+ return 'n/a'
+
+
+def move_etcd_data_to_standard_location():
+ ''' Moves etcd data to the standard location if it's not already located
+ there. This is necessary when generating new etcd config after etcd has
+ been upgraded from version 2.3 to 3.x.
+ '''
+ bag = EtcdDatabag()
+ conf_path = bag.etcd_conf_dir + '/etcd.conf.yml'
+ if not os.path.exists(conf_path):
+ return
+ with open(conf_path) as f:
+ conf = yaml.safe_load(f)
+ data_dir = conf['data-dir']
+ desired_data_dir = bag.etcd_data_dir
+ if data_dir != desired_data_dir:
+ log('Moving etcd data from %s to %s' % (data_dir, desired_data_dir))
+ host.service_stop('snap.etcd.etcd')
+ for filename in os.listdir(data_dir):
+ os.rename(
+ data_dir + '/' + filename,
+ desired_data_dir + '/' + filename
+ )
+ os.rmdir(data_dir)
+ conf['data-dir'] = desired_data_dir
+ with open(conf_path, 'w') as f:
+ yaml.dump(conf, f)
+ host.service_start('snap.etcd.etcd')
diff --git a/etcd/reactive/leadership.py b/etcd/reactive/leadership.py
new file mode 100644
index 0000000..29c6f3a
--- /dev/null
+++ b/etcd/reactive/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.leadership import leader_get, leader_set
+
+
+__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
+
+
+def initialize_leadership_state():
+ '''Initialize leadership.* states from the hook environment.
+
+ Invoked by hookenv.atstart() so states are available in
+ @hook decorated handlers.
+ '''
+ is_leader = hookenv.is_leader()
+ if is_leader:
+ hookenv.log('Initializing Leadership Layer (is leader)')
+ else:
+ hookenv.log('Initializing Leadership Layer (is follower)')
+
+ reactive.helpers.toggle_state('leadership.is_leader', is_leader)
+
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+ current = hookenv.leader_get()
+
+ # Handle deletions.
+ for key in set(previous.keys()) - set(current.keys()):
+ current[key] = None
+
+ any_changed = False
+ for key, value in current.items():
+ reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
+ value != previous.get(key))
+ if value != previous.get(key):
+ any_changed = True
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ reactive.helpers.toggle_state('leadership.changed', any_changed)
+
+ unitdata.kv().update(current, prefix='leadership.settings.')
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(reactive, '_leadership_registered'):
+ hookenv.atstart(initialize_leadership_state)
+ reactive._leadership_registered = True
diff --git a/etcd/reactive/snap.py b/etcd/reactive/snap.py
new file mode 100644
index 0000000..1fda7b7
--- /dev/null
+++ b/etcd/reactive/snap.py
@@ -0,0 +1,349 @@
+# Copyright 2016-2019 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+charms.reactive helpers for dealing with Snap packages.
+"""
+from collections import OrderedDict
+from distutils.version import LooseVersion
+import os.path
+from os import uname
+import shutil
+import subprocess
+from textwrap import dedent
+import time
+from urllib.request import urlretrieve
+
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.hookenv import ERROR
+from charmhelpers.core.host import write_file
+from charms import layer
+from charms import reactive
+from charms.layer import snap
+from charms.reactive import register_trigger, when, when_not, toggle_flag
+from charms.reactive.helpers import data_changed
+
+
+class UnsatisfiedMinimumVersionError(Exception):
+ def __init__(self, desired, actual):
+ super().__init__()
+ self.desired = desired
+ self.actual = actual
+
+ def __str__(self):
+ return "Could not install snapd >= {0.desired}, got {0.actual}".format(self)
+
+
+class InvalidBundleError(Exception):
+ pass
+
+
+def sorted_snap_opts():
+ opts = layer.options("snap")
+ opts = sorted(opts.items(), key=lambda item: item[0] != "core")
+ opts = OrderedDict(opts)
+ return opts
+
+
+def install():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname().machine
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ # Note that this does *not* error. The charm will need to
+ # cope with the snaps it requested never getting installed,
+ # likely by doing its own check on supported-architectures.
+ hookenv.log(
+ "Snap {} not supported on {!r} architecture" "".format(snapname, arch),
+ ERROR,
+ )
+ continue
+ installed_flag = "snap.installed.{}".format(snapname)
+ if not reactive.is_flag_set(installed_flag):
+ snap.install(snapname, **snap_opts)
+ if data_changed("snap.install.opts", opts):
+ snap.connect_all()
+
+
+def check_refresh_available():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ available_refreshes = snap.get_available_refreshes()
+ for snapname in snap.get_installed_snaps():
+ toggle_flag(snap.get_refresh_available_flag(snapname), snapname in available_refreshes)
+
+
+def refresh():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname()[4]
+ check_refresh_available()
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ continue
+ snap.refresh(snapname, **snap_opts)
+ snap.connect_all()
+
+
+@reactive.hook("upgrade-charm")
+def upgrade_charm():
+ refresh()
+
+
+def get_series():
+ return subprocess.check_output(["lsb_release", "-sc"], universal_newlines=True).strip()
+
+
+def snapd_supported():
+ # snaps are not supported in trusty lxc containers.
+ if get_series() == "trusty" and host.is_container():
+ return False
+ return True # For all other cases, assume true.
+
+
+def kernel_supported():
+ kernel_version = uname().release
+
+ if LooseVersion(kernel_version) < LooseVersion("4.4"):
+ hookenv.log(
+ "Snaps do not work on kernel {}, a reboot "
+ "into a supported kernel (>4.4) is required"
+ "".format(kernel_version)
+ )
+ return False
+ return True
+
+
+def ensure_snapd():
+ if not snapd_supported():
+ hookenv.log("Snaps do not work in this environment", hookenv.ERROR)
+ raise Exception("Snaps do not work in this environment")
+
+ # I don't use the apt layer, because that would tie this layer
+ # too closely to apt packaging. Perhaps this is a snap-only system.
+ if not shutil.which("snap"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "snapd"]
+ # LP:1699986: Force install of systemd on Trusty.
+ if get_series() == "trusty":
+ cmd.append("systemd")
+ subprocess.check_call(cmd, universal_newlines=True)
+
+ # Work around lp:1628289. Remove this stanza once snapd depends
+ # on the necessary package and snaps work in lxd xenial containers
+ # without the workaround.
+ if host.is_container() and not shutil.which("squashfuse"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "squashfuse", "fuse"]
+ subprocess.check_call(cmd, universal_newlines=True)
+
+
+def proxy_settings():
+ proxy_vars = ("http_proxy", "https_proxy")
+ proxy_env = {key: value for key, value in os.environ.items() if key in proxy_vars}
+
+ snap_proxy = hookenv.config().get("snap_proxy")
+ if snap_proxy:
+ proxy_env["http_proxy"] = snap_proxy
+ proxy_env["https_proxy"] = snap_proxy
+ return proxy_env
+
+
+def update_snap_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ # This is a hack based on
+ # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1
+ # Do it properly when Bug #1533899 is addressed.
+ # Note we can't do this in a standard reactive handler as we need
+ # to ensure proxies are configured before attempting installs or
+ # updates.
+ proxy = proxy_settings()
+
+ override_dir = "/etc/systemd/system/snapd.service.d"
+ path = os.path.join(override_dir, "snap_layer_proxy.conf")
+ if not proxy and not os.path.exists(path):
+ return # No proxy asked for and proxy never configured.
+
+ # It seems we cannot rely on this directory existing, so manually
+ # create it.
+ if not os.path.exists(override_dir):
+ host.mkdir(override_dir, perms=0o755)
+
+ if not data_changed("snap.proxy", proxy):
+ return # Short circuit avoids unnecessary restarts.
+
+ if proxy:
+ create_snap_proxy_conf(path, proxy)
+ else:
+ remove_snap_proxy_conf(path)
+ subprocess.check_call(["systemctl", "daemon-reload"], universal_newlines=True)
+ time.sleep(2)
+ subprocess.check_call(["systemctl", "restart", "snapd.service"], universal_newlines=True)
+
+
+def create_snap_proxy_conf(path, proxy):
+ host.mkdir(os.path.dirname(path))
+ content = dedent(
+ """\
+ # Managed by Juju
+ [Service]
+ """
+ )
+ for proxy_key, proxy_value in proxy.items():
+ content += "Environment={}={}\n".format(proxy_key, proxy_value)
+ host.write_file(path, content.encode())
+
+
+def remove_snap_proxy_conf(path):
+ if os.path.exists(path):
+ os.remove(path)
+
+
+def ensure_path():
+ # Per Bug #1662856, /snap/bin may be missing from $PATH. Fix this.
+ if "/snap/bin" not in os.environ["PATH"].split(":"):
+ os.environ["PATH"] += ":/snap/bin"
+
+
+def _get_snapd_version():
+ stdout = subprocess.check_output(["snap", "version"], stdin=subprocess.DEVNULL, universal_newlines=True)
+ version_info = dict(line.split(None, 1) for line in stdout.splitlines())
+ return LooseVersion(version_info["snapd"])
+
+
+PREFERENCES = """\
+Package: *
+Pin: release a={}-proposed
+Pin-Priority: 400
+"""
+
+
+def ensure_snapd_min_version(min_version):
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ from charmhelpers.fetch import add_source, apt_update, apt_install
+
+ # Temporary until LP:1735344 lands
+ add_source("distro-proposed", fail_invalid=True)
+ distro = get_series()
+ # disable proposed by default, needs to explicit
+ write_file(
+ "/etc/apt/preferences.d/proposed",
+ PREFERENCES.format(distro),
+ )
+ apt_update()
+ # explicitly install snapd from proposed
+ apt_install("snapd/{}-proposed".format(distro))
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR)
+ raise UnsatisfiedMinimumVersionError(min_version, snapd_version)
+
+
+def download_assertion_bundle(proxy_url):
+ """Download proxy assertion bundle and store id"""
+ assertions_url = "{}/v2/auth/store/assertions".format(proxy_url)
+ local_bundle, headers = urlretrieve(assertions_url)
+ store_id = headers["X-Assertion-Store-Id"]
+ return local_bundle, store_id
+
+
+def configure_snap_store_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ if not reactive.is_flag_set("config.changed.snap_proxy_url"):
+ return
+ config = hookenv.config()
+ if "snap_proxy_url" not in config:
+ # The deprecated snap_proxy_url config items have been removed
+ # from config.yaml. If the charm author hasn't added them back
+ # explicitly, there is nothing to do. Juju is maintaining these
+ # settings as model configuration.
+ return
+ snap_store_proxy_url = config.get("snap_proxy_url")
+ if not snap_store_proxy_url and not config.previous("snap_proxy_url"):
+ # Proxy url is not set, and was not set previous hook. Do nothing,
+ # to avoid overwriting the Juju maintained setting.
+ return
+ ensure_snapd_min_version("2.30")
+ if snap_store_proxy_url:
+ bundle, store_id = download_assertion_bundle(snap_store_proxy_url)
+ try:
+ subprocess.check_output(
+ ["snap", "ack", bundle],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("snapd could not ack the proxy assertion: " + e.output)
+ else:
+ store_id = ""
+
+ try:
+ subprocess.check_output(
+ ["snap", "set", "core", "proxy.store={}".format(store_id)],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("Proxy ID from header did not match store assertion: " + e.output)
+
+
+register_trigger(when="config.changed.snapd_refresh", clear_flag="snap.refresh.set")
+
+
+@when_not("snap.refresh.set")
+@when("snap.installed.core")
+def change_snapd_refresh():
+ """Set the system refresh.timer option"""
+ ensure_snapd_min_version("2.31")
+ timer = hookenv.config()["snapd_refresh"]
+ was_set = reactive.is_flag_set("snap.refresh.was-set")
+ if timer or was_set:
+ snap.set_refresh_timer(timer)
+ reactive.toggle_flag("snap.refresh.was-set", timer)
+ reactive.set_flag("snap.refresh.set")
+
+
+# Bootstrap. We don't use standard reactive handlers to ensure that
+# everything is bootstrapped before any charm handlers are run.
+hookenv.atstart(hookenv.log, "Initializing Snap Layer")
+hookenv.atstart(ensure_snapd)
+hookenv.atstart(ensure_path)
+hookenv.atstart(update_snap_proxy)
+hookenv.atstart(configure_snap_store_proxy)
+hookenv.atstart(install)
diff --git a/etcd/reactive/status.py b/etcd/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/etcd/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/etcd/reactive/tls_client.py b/etcd/reactive/tls_client.py
new file mode 100644
index 0000000..afa2228
--- /dev/null
+++ b/etcd/reactive/tls_client.py
@@ -0,0 +1,208 @@
+import os
+
+from pathlib import Path
+from subprocess import check_call
+
+from charms import layer
+from charms.reactive import hook
+from charms.reactive import set_state, remove_state
+from charms.reactive import when
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import endpoint_from_flag
+from charms.reactive.helpers import data_changed
+
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.core.hookenv import log
+
+
+@when('certificates.ca.available')
+def store_ca(tls):
+ '''Read the certificate authority from the relation object and install
+ the ca on this system.'''
+ # Get the CA from the relationship object.
+ certificate_authority = tls.get_ca()
+ if certificate_authority:
+ layer_options = layer.options('tls-client')
+ ca_path = layer_options.get('ca_certificate_path')
+ changed = data_changed('certificate_authority', certificate_authority)
+ if ca_path:
+ if changed or not os.path.exists(ca_path):
+ log('Writing CA certificate to {0}'.format(ca_path))
+ # ensure we have a newline at the end of the certificate.
+ # some things will blow up without one.
+ # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034
+ if not certificate_authority.endswith('\n'):
+ certificate_authority += '\n'
+ _write_file(ca_path, certificate_authority)
+ set_state('tls_client.ca.written')
+ set_state('tls_client.ca.saved')
+ if changed:
+ # Update /etc/ssl/certs and generate ca-certificates.crt
+ install_ca(certificate_authority)
+
+
+@when('certificates.server.cert.available')
+def store_server(tls):
+ '''Read the server certificate and server key from the relation object
+ and save them to the certificate directory..'''
+ server_cert, server_key = tls.get_server_cert()
+ chain = tls.get_chain()
+ if chain:
+ server_cert = server_cert + '\n' + chain
+ if server_cert and server_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('server_certificate_path')
+ key_path = layer_options.get('server_key_path')
+ cert_changed = data_changed('server_certificate', server_cert)
+ key_changed = data_changed('server_key', server_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing server certificate to {0}'.format(cert_path))
+ _write_file(cert_path, server_cert)
+ set_state('tls_client.server.certificate.written')
+ set_state('tls_client.server.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing server key to {0}'.format(key_path))
+ _write_file(key_path, server_key)
+ set_state('tls_client.server.key.saved')
+
+
+@when('certificates.client.cert.available')
+def store_client(tls):
+ '''Read the client certificate and client key from the relation object
+ and copy them to the certificate directory.'''
+ client_cert, client_key = tls.get_client_cert()
+ chain = tls.get_chain()
+ if chain:
+ client_cert = client_cert + '\n' + chain
+ if client_cert and client_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('client_certificate_path')
+ key_path = layer_options.get('client_key_path')
+ cert_changed = data_changed('client_certificate', client_cert)
+ key_changed = data_changed('client_key', client_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing client certificate to {0}'.format(cert_path))
+ _write_file(cert_path, client_cert)
+ set_state('tls_client.client.certificate.written')
+ set_state('tls_client.client.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing client key to {0}'.format(key_path))
+ _write_file(key_path, client_key)
+ set_state('tls_client.client.key.saved')
+
+
+@when('certificates.certs.changed')
+def update_certs():
+ tls = endpoint_from_flag('certificates.certs.changed')
+ certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {})
+ all_ready = True
+ any_changed = False
+ maps = {
+ 'server': tls.server_certs_map,
+ 'client': tls.client_certs_map,
+ }
+
+ if maps.get('client') == {}:
+ log(
+ 'No client certs found using maps. Checking for global \
+ client certificates.',
+ 'WARNING'
+ )
+ # Check for global certs,
+ # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819
+ cert_pair = tls.get_client_cert()
+ if cert_pair is not None:
+ for client_name in certs_paths.get('client', {}).keys():
+ maps.get('client').update({
+ client_name: cert_pair
+ })
+
+ chain = tls.get_chain()
+ for cert_type in ('server', 'client'):
+ for common_name, paths in certs_paths.get(cert_type, {}).items():
+ cert_pair = maps[cert_type].get(common_name)
+ if not cert_pair:
+ all_ready = False
+ continue
+ if not data_changed('layer.tls-client.'
+ '{}.{}'.format(cert_type, common_name), cert_pair):
+ continue
+
+ cert = None
+ key = None
+ if type(cert_pair) is not tuple:
+ if paths['crt']:
+ cert = cert_pair.cert
+ if paths['key']:
+ key = cert_pair.key
+ else:
+ cert, key = cert_pair
+
+ if cert:
+ if chain:
+ cert = cert + '\n' + chain
+ _ensure_directory(paths['crt'])
+ Path(paths['crt']).write_text(cert)
+
+ if key:
+ _ensure_directory(paths['key'])
+ Path(paths['key']).write_text(key)
+
+ any_changed = True
+ # clear flags first to ensure they are re-triggered if left set
+ clear_flag('tls_client.{}.certs.changed'.format(cert_type))
+ clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ set_flag('tls_client.{}.certs.changed'.format(cert_type))
+ set_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ if all_ready:
+ set_flag('tls_client.certs.saved')
+ if any_changed:
+ clear_flag('tls_client.certs.changed')
+ set_flag('tls_client.certs.changed')
+ clear_flag('certificates.certs.changed')
+
+
+def install_ca(certificate_authority):
+ '''Install a certificiate authority on the system by calling the
+ update-ca-certificates command.'''
+ if certificate_authority:
+ name = hookenv.service_name()
+ # Create a path to install CAs on Debian systems.
+ ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name)
+ log('Writing CA certificate to {0}'.format(ca_path))
+ _write_file(ca_path, certificate_authority)
+ # Update the trusted CAs on this system (a time expensive operation).
+ check_call(['update-ca-certificates'])
+ log('Generated ca-certificates.crt for {0}'.format(name))
+ set_state('tls_client.ca_installed')
+
+
+@hook('upgrade-charm')
+def remove_states():
+ remove_state('tls_client.ca.saved')
+ remove_state('tls_client.server.certificate.saved')
+ remove_state('tls_client.server.key.saved')
+ remove_state('tls_client.client.certificate.saved')
+ remove_state('tls_client.client.key.saved')
+
+
+def _ensure_directory(path):
+ '''Ensure the parent directory exists creating directories if necessary.'''
+ directory = os.path.dirname(path)
+ if not os.path.isdir(directory):
+ os.makedirs(directory)
+ os.chmod(directory, 0o770)
+
+
+def _write_file(path, content):
+ '''Write the path to a file.'''
+ _ensure_directory(path)
+ with open(path, 'w') as stream:
+ stream.write(content)
+ os.chmod(path, 0o440)
diff --git a/etcd/requirements.txt b/etcd/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/etcd/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/etcd/revision b/etcd/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/etcd/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/etcd/setup.py b/etcd/setup.py
new file mode 100755
index 0000000..b30bff5
--- /dev/null
+++ b/etcd/setup.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+import os
+from setuptools import setup
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+with open(os.path.join(here, "README.md")) as f:
+ README = f.read()
+
+setup(
+ name="layer_snap",
+ version="1.0.0",
+ description="layer_snap",
+ long_description=README,
+ license="Apache License 2.0",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3",
+ ],
+ url="https://git.launchpad.net/layer-snap",
+ package_dir={"": "lib"},
+ packages=["charms/layer"],
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=["charmhelpers", "charms.reactive"],
+)
diff --git a/etcd/templates/cdk-service-kicker b/etcd/templates/cdk-service-kicker
new file mode 100644
index 0000000..26d3740
--- /dev/null
+++ b/etcd/templates/cdk-service-kicker
@@ -0,0 +1,34 @@
+#!/bin/sh
+set -eu
+
+# This service runs on boot to work around issues relating to LXD and snapd.
+
+# Workaround for https://github.com/conjure-up/conjure-up/issues/1448
+if [ -f '/proc/1/environ' ] && grep -q '^container=lxc' /proc/1/environ; then
+ echo "lxc detected, applying snapd apparmor profiles"
+ (set +e
+ apparmor_parser /var/lib/snapd/apparmor/profiles/*
+ echo "apparmor_parser: exit status $?"
+ )
+else
+ echo "lxc not detected, skipping snapd apparmor profiles"
+fi
+
+# Workaround for https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+services="{{services}}"
+
+deadline="$(expr "$(date +%s)" + 600)"
+
+while [ "$(date +%s)" -lt "$deadline" ]; do
+ for service in $services; do
+ echo "$service: checking"
+ if ! systemctl is-active "$service"; then
+ echo "$service: not active, restarting"
+ systemctl restart "$service" || true
+ fi
+ done
+
+ sleep 10
+done
+
+echo "deadline has passed, exiting gracefully"
diff --git a/etcd/templates/cdk-service-kicker.service b/etcd/templates/cdk-service-kicker.service
new file mode 100644
index 0000000..5c2105e
--- /dev/null
+++ b/etcd/templates/cdk-service-kicker.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=cdk-service-kicker
+
+[Service]
+ExecStart=/usr/bin/cdk-service-kicker
+Restart=on-failure
+Type=simple
+
+[Install]
+WantedBy=multi-user.target
diff --git a/etcd/templates/check_etcd-alarms.cron b/etcd/templates/check_etcd-alarms.cron
new file mode 100644
index 0000000..5fc0365
--- /dev/null
+++ b/etcd/templates/check_etcd-alarms.cron
@@ -0,0 +1,2 @@
+# check_etcd_alarms
+* * * * * root [ -x /snap/bin/etcdctl ] && ETCDCTL_API=3 /snap/bin/etcdctl --endpoints=127.0.0.1:4001 alarm list > /var/lib/nagios/etcd-alarm-list.txt
diff --git a/etcd/templates/check_etcd-alarms.py b/etcd/templates/check_etcd-alarms.py
new file mode 100644
index 0000000..1c7c7b9
--- /dev/null
+++ b/etcd/templates/check_etcd-alarms.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2020 Canonical Ltd.
+
+import nagios_plugin3
+
+
+def load_alarm_list():
+ """Load the cached status from disk, return it as a string"""
+ alarm_list_path = '/var/lib/nagios/etcd-alarm-list.txt'
+
+ with open(alarm_list_path, 'r') as alarm_list_log:
+ alarm_list = alarm_list_log.read()
+
+ return alarm_list.strip()
+
+
+def check_alarms():
+ """Raise an error if the cached status contains any non-blank lines"""
+ alarms = []
+ alarm_list = load_alarm_list()
+ for line in alarm_list.splitlines():
+ line = line.strip()
+ if line:
+ alarms.append(line)
+ if alarms:
+ raise nagios_plugin3.CriticalError(' '.join(alarms))
+
+
+def main():
+ nagios_plugin3.try_check(check_alarms)
+ print("OK - no active alarms")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/etcd/templates/etcd2.conf b/etcd/templates/etcd2.conf
new file mode 100644
index 0000000..0e97a28
--- /dev/null
+++ b/etcd/templates/etcd2.conf
@@ -0,0 +1,23 @@
+# This file is rendered by Juju, manual edits will not be persisted
+ETCD_DATA_DIR={{ etcd_data_dir }}/{{ unit_name }}.etcd
+ETCD_NAME={{ unit_name }}
+ETCD_ADVERTISE_CLIENT_URLS="https://{{ db_address }}:{{ port }}"
+ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:4001,https://{{ db_bind_address }}:{{ port }}"
+ETCD_LISTEN_PEER_URLS="https://{{ cluster_bind_address }}:{{ management_port }}"
+ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ cluster_address }}:{{ management_port }}"
+{% if cluster %}
+ETCD_INITIAL_CLUSTER="{{ cluster }}"
+ETCD_INITIAL_CLUSTER_STATE={{ cluster_state }}
+ETCD_INITIAL_CLUSTER_TOKEN={{ token }}
+{% endif %}
+# SSL CONFIGURATION
+ETCD_CERT_FILE={{ server_certificate }}
+ETCD_KEY_FILE={{ server_key }}
+ETCD_TRUSTED_CA_FILE={{ ca_certificate }}
+ETCD_PEER_CERT_FILE={{ server_certificate }}
+ETCD_PEER_KEY_FILE={{ server_key }}
+ETCD_PEER_TRUSTED_CA_FILE={{ ca_certificate }}
+# SSL Strict Mode
+ETCD_PEER_CLIENT_CERT_AUTH=true
+ETCD_CLIENT_CERT_AUTH=true
+ETCD_STRICT_RECONFIG_CHECK=true
diff --git a/etcd/templates/etcd3.conf b/etcd/templates/etcd3.conf
new file mode 100644
index 0000000..754c917
--- /dev/null
+++ b/etcd/templates/etcd3.conf
@@ -0,0 +1,141 @@
+# This is the configuration file for the etcd server.
+
+# Human-readable name for this member.
+name: {{ unit_name }}
+
+# Enable API v2 support for flannel and
+# certain charm executions.
+enable-v2: true
+
+# Path to the data directory.
+data-dir: {{ etcd_data_dir }}
+
+{% if wal_path %}
+# Path to the dedicated wal directory.
+wal-dir: {{ etcd_data_dir }}
+{% endif %}
+# Number of committed transactions to trigger a snapshot to disk.
+snapshot-count: 10000
+
+# Time (in milliseconds) of a heartbeat interval.
+heartbeat-interval: 100
+
+# Time (in milliseconds) for an election to timeout.
+election-timeout: 1000
+
+# Raise alarms when backend size exceeds the given quota. 0 means use the
+# default quota.
+quota-backend-bytes: 0
+
+# List of comma separated URLs to listen on for peer traffic.
+listen-peer-urls: https://{{ cluster_bind_address }}:{{ management_port}}
+# List of comma separated URLs to listen on for client traffic.
+listen-client-urls: http://127.0.0.1:4001,https://{{ db_bind_address }}:{{ port }}
+
+# Maximum number of snapshot files to retain (0 is unlimited).
+max-snapshots: 5
+
+# Maximum number of wal files to retain (0 is unlimited).
+max-wals: 5
+
+# Comma-separated white list of origins for CORS (cross-origin resource sharing).
+cors:
+
+# List of this member's peer URLs to advertise to the rest of the cluster.
+# The URLs needed to be a comma-separated list.
+initial-advertise-peer-urls: https://{{ cluster_address }}:{{ management_port }}
+
+# List of this member's client URLs to advertise to the public.
+# The URLs needed to be a comma-separated list.
+advertise-client-urls: https://{{ db_address }}:{{ port }}
+
+# Discovery URL used to bootstrap the cluster.
+discovery:
+
+# Valid values include 'exit', 'proxy'
+discovery-fallback: 'proxy'
+
+# HTTP proxy to use for traffic to discovery service.
+discovery-proxy:
+
+# DNS domain used to bootstrap initial cluster.
+discovery-srv:
+
+# Initial cluster configuration for bootstrapping.
+initial-cluster: {{ cluster }}
+
+# Initial cluster token for the etcd cluster during bootstrap.
+initial-cluster-token: '{{ token }}'
+
+# Initial cluster state ('new' or 'existing').
+initial-cluster-state: {{ cluster_state }}
+
+# Reject reconfiguration requests that would cause quorum loss.
+strict-reconfig-check: true
+
+# Valid values include 'on', 'readonly', 'off'
+proxy: 'off'
+
+# Time (in milliseconds) an endpoint will be held in a failed state.
+proxy-failure-wait: 5000
+
+# Time (in milliseconds) of the endpoints refresh interval.
+proxy-refresh-interval: 30000
+
+# Time (in milliseconds) for a dial to timeout.
+proxy-dial-timeout: 1000
+
+# Time (in milliseconds) for a write to timeout.
+proxy-write-timeout: 5000
+
+# Time (in milliseconds) for a read to timeout.
+proxy-read-timeout: 0
+
+client-transport-security:
+ # DEPRECATED: Path to the client server TLS CA file.
+ # ca-file: {{ ca_certificate }}
+
+ # Path to the client server TLS cert file.
+ cert-file: {{ server_certificate }}
+
+ # Path to the client server TLS key file.
+ key-file: {{ server_key }}
+
+ # Enable client cert authentication.
+ client-cert-auth: true
+
+ # Path to the client server TLS trusted CA key file.
+ trusted-ca-file: {{ ca_certificate }}
+
+ # Client TLS using generated certificates
+ auto-tls: false
+
+peer-transport-security:
+ # DEPRECATED: Path to the peer server TLS CA file.
+ # ca-file: {{ ca_certificate }}
+
+ # Path to the peer server TLS cert file.
+ cert-file: {{ server_certificate }}
+
+ # Path to the peer server TLS key file.
+ key-file: {{ server_key }}
+
+ # Enable peer client cert authentication.
+ client-cert-auth: true
+
+ # Path to the peer server TLS trusted CA key file.
+ trusted-ca-file: {{ ca_certificate }}
+
+ # Peer TLS using generated certificates.
+ auto-tls: false
+
+# Enable debug-level logging for etcd.
+debug: false
+
+{% if loglevel %}
+# Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG'.
+log-package-levels:
+{% endif %}
+
+# Force to create a new one member cluster.
+force-new-cluster: false
diff --git a/etcd/templates/service-always-restart.systemd-229.conf b/etcd/templates/service-always-restart.systemd-229.conf
new file mode 100644
index 0000000..d5cf4b1
--- /dev/null
+++ b/etcd/templates/service-always-restart.systemd-229.conf
@@ -0,0 +1,5 @@
+[Unit]
+StartLimitInterval=0
+
+[Service]
+RestartSec=10
diff --git a/etcd/templates/service-always-restart.systemd-latest.conf b/etcd/templates/service-always-restart.systemd-latest.conf
new file mode 100644
index 0000000..3dd37ab
--- /dev/null
+++ b/etcd/templates/service-always-restart.systemd-latest.conf
@@ -0,0 +1,5 @@
+[Unit]
+StartLimitIntervalSec=0
+
+[Service]
+RestartSec=10
diff --git a/etcd/tests/10-deploy.py b/etcd/tests/10-deploy.py
new file mode 100755
index 0000000..4c336d8
--- /dev/null
+++ b/etcd/tests/10-deploy.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+
+import amulet
+import unittest
+import re
+
+
+class TestDeployment(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.d = amulet.Deployment(series='xenial')
+ cls.d.add('etcd')
+ cls.d.add('easyrsa', 'cs:~containers/easyrsa')
+ cls.d.configure('etcd', {'channel': '3.0/stable'})
+ cls.d.relate('easyrsa:client', 'etcd:certificates')
+ cls.d.setup(timeout=1200)
+ cls.d.sentry.wait_for_messages({'etcd':
+ re.compile('Healthy*|Unhealthy*')})
+ # cls.d.sentry.wait()
+ cls.etcd = cls.d.sentry['etcd']
+ # find the leader
+ for unit in cls.etcd:
+ leader_result = unit.run('is-leader')
+ if leader_result[0] == 'True':
+ cls.leader = unit
+
+ def test_leader_status(self):
+ ''' Verify our leader is running the etcd daemon '''
+ status = self.leader.run('systemctl is-active snap.etcd.etcd')
+ self.assertFalse("inactive" in status[0])
+ self.assertTrue("active" in status[0])
+
+ def test_config_snapd_refresh(self):
+ ''' Verify initial snap refresh config is set and can be changed '''
+ # default timer should be some day of the week followed by a number
+ timer = self.leader.run('snap get core refresh.timer')
+ self.assertTrue(len(timer[0]) == len('dayX'))
+
+ # verify a new timer value
+ self.d.configure('etcd', {'snapd_refresh': 'fri5'})
+ self.d.sentry.wait()
+ timer = self.leader.run('snap get core refresh.timer')
+ self.assertTrue(timer[0] == 'fri5')
+
+ def test_node_scale(self):
+ ''' Scale beyond 1 node because etcd supports peering as a standalone
+ application.'''
+ # Ensure we aren't testing a single node
+ if not len(self.etcd) > 1:
+ self.d.add_unit('etcd', timeout=1200)
+ self.d.sentry.wait()
+
+ for unit in self.etcd:
+ status = unit.run('systemctl is-active snap.etcd.etcd')
+ self.assertFalse(status[1] == 1)
+ self.assertFalse("inactive" in status[0])
+ self.assertTrue("active" in status[0])
+
+ def test_cluster_health(self):
+ ''' Iterate all the units and verify we have a clean bill of health
+ from etcd '''
+
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt " \
+ "ETCDCTL_KEY=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CACERT=/var/snap/etcd/common/ca.crt"
+
+ for unit in self.etcd:
+ cmd = '{} /snap/bin/etcdctl cluster-health'.format(certs)
+ health = unit.run(cmd)
+ self.assertTrue('unhealthy' not in health)
+ self.assertTrue('unavailable' not in health)
+
+ def test_leader_knows_all_members(self):
+ ''' Test we have the same number of units deployed and reporting in
+ the etcd cluster as participating'''
+
+ # The spacing here is semi-important as its a string of ENV exports
+ # also, this is hard coding for the defaults. if the defaults in
+ # layer.yaml change, this will need to change.
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt " \
+ "ETCDCTL_KEY=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CACERT=/var/snap/etcd/common/ca.crt"
+
+ # format the command, and execute on the leader
+ cmd = '{} etcdctl member list'.format(certs)
+ out = self.leader.run(cmd)[0]
+ # turn the output into a list so we can iterate
+ members = out.split('\n')
+ for item in members:
+ # this is responded when TLS is enabled and we don't have proper
+ # Keys. This is kind of a "ssl works test" but of the worst
+ # variety... assuming the full stack completed.
+ self.assertTrue('etcd cluster is unavailable' not in members)
+ self.assertTrue(len(members) == len(self.etcd))
+
+ def test_node_scale_down_members(self):
+ ''' Scale the cluster down and ensure the cluster state is still
+ healthy '''
+ # Remove the leader
+ self.d.remove_unit(self.leader.info['unit_name'])
+ self.d.sentry.wait()
+ # re-use the cluster-health test to validate we are still healthy.
+ self.test_cluster_health()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/etcd/tests/20-actions.py b/etcd/tests/20-actions.py
new file mode 100755
index 0000000..28c651e
--- /dev/null
+++ b/etcd/tests/20-actions.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+
+import os
+import re
+import unittest
+import subprocess
+
+import amulet
+
+
+class TestActions(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.d = amulet.Deployment(series='xenial')
+ cls.d.add('etcd')
+ cls.d.add('easyrsa', 'cs:~containers/easyrsa')
+ cls.d.configure('etcd', {'channel': '3.0/stable'})
+ cls.d.relate('easyrsa:client', 'etcd:certificates')
+ cls.d.setup(timeout=1200)
+ cls.d.sentry.wait_for_messages({'etcd':
+ re.compile('Healthy*|Unhealthy*')})
+ # cls.d.sentry.wait()
+ cls.etcd = cls.d.sentry['etcd']
+
+ def test_health_check(self):
+ """
+ Trigger health action
+ """
+ action_id = self.etcd[0].run_action('health')
+ outcome = self.d.action_fetch(action_id,
+ timeout=7200,
+ raise_on_timeout=True,
+ full_output=True)
+ self.assertEqual(outcome['status'], 'completed')
+ self.assertTrue("cluster is healthy" in outcome['results']['result-map']['message'])
+
+ def test_snapshot_restore(self):
+ """
+ Trigger snapshot and restore actions
+ """
+ # Load dummy data
+ self.load_data()
+ self.assertTrue(self.is_data_present('v2'))
+ self.assertTrue(self.is_data_present('v3'))
+
+ filenames = {}
+ for dataset in ['v2', 'v3']:
+ # Take snapshot of data
+ action_id = self.etcd[0].run_action('snapshot', {'keys-version': dataset})
+ outcome = self.d.action_fetch(action_id,
+ timeout=7200,
+ raise_on_timeout=True,
+ full_output=True)
+ self.assertEqual(outcome['status'], 'completed')
+ cpcmd = outcome['results']['copy']['cmd']
+ subprocess.check_call(cpcmd.split())
+ filenames[dataset] = os.path.basename(outcome['results']['snapshot']['path'])
+
+ self.delete_data()
+ self.assertFalse(self.is_data_present('v2'))
+ self.assertFalse(self.is_data_present('v3'))
+
+ # Restore v2 data
+ cmd = 'juju attach etcd snapshot=%s' % filenames['v2']
+ subprocess.check_call(cmd.split())
+ action_id = self.etcd[0].run_action('restore')
+ outcome = self.d.action_fetch(action_id,
+ timeout=7200,
+ raise_on_timeout=True,
+ full_output=True)
+ self.assertEqual(outcome['status'], 'completed')
+ self.assertTrue(self.is_data_present('v2'))
+ self.assertFalse(self.is_data_present('v3'))
+
+ # Restore v3 data
+ cmd = 'juju attach etcd snapshot=%s' % filenames['v3']
+ subprocess.check_call(cmd.split())
+ action_id = self.etcd[0].run_action('restore')
+ outcome = self.d.action_fetch(action_id,
+ timeout=7200,
+ raise_on_timeout=True,
+ full_output=True)
+ self.assertEqual(outcome['status'], 'completed')
+ self.assertFalse(self.is_data_present('v2'))
+ self.assertTrue(self.is_data_present('v3'))
+
+ def load_data(self):
+ """
+ Load dummy data
+
+ """
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt " \
+ "ETCDCTL_KEY=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CACERT=/var/snap/etcd/common/ca.crt"
+
+ cmd = '{} ETCDCTL_API=2 /snap/bin/etcdctl set /etcd2key etcd2value'.format(certs)
+ self.etcd[0].run(cmd)
+ cmd = '{} ETCDCTL_API=3 /snap/bin/etcdctl --endpoints=http://localhost:4001 ' \
+ 'put etcd3key etcd3value'.format(certs)
+ self.etcd[0].run(cmd)
+
+ def is_data_present(self, version):
+ '''
+ Check if we have the data present on the datastore of the version
+ Args:
+ version: v2 or v3 etcd datastore
+
+ Returns: True if the data is present
+
+ '''
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt " \
+ "ETCDCTL_KEY=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CACERT=/var/snap/etcd/common/ca.crt"
+
+ if version == 'v2':
+ cmd = '{} ETCDCTL_API=2 /snap/bin/etcdctl ls'.format(certs)
+ data = self.etcd[0].run(cmd)
+ return 'etcd2key' in data[0]
+ elif version == 'v3':
+ cmd = '{} ETCDCTL_API=3 /snap/bin/etcdctl --endpoints=http://localhost:4001 ' \
+ 'get "" --prefix --keys-only'.format(certs)
+ data = self.etcd[0].run(cmd)
+ return 'etcd3key' in data[0]
+ else:
+ return False
+
+ def delete_data(self):
+ '''
+ Delete all dummy data on etcd
+ '''
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt " \
+ "ETCDCTL_KEY=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CACERT=/var/snap/etcd/common/ca.crt"
+
+ cmd = '{} ETCDCTL_API=2 /snap/bin/etcdctl rm /etcd2key'.format(certs)
+ self.etcd[0].run(cmd)
+ cmd = '{} ETCDCTL_API=3 /snap/bin/etcdctl --endpoints=http://localhost:4001 ' \
+ 'del etcd3key'.format(certs)
+ self.etcd[0].run(cmd)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/etcd/tests/30-deb-bundle.yml b/etcd/tests/30-deb-bundle.yml
new file mode 100644
index 0000000..7a9c082
--- /dev/null
+++ b/etcd/tests/30-deb-bundle.yml
@@ -0,0 +1,11 @@
+series: xenial
+applications:
+ easyrsa:
+ charm: cs:~containers/easyrsa
+ num_units: 1
+ etcd:
+ charm: cs:etcd-24
+ num_units: 1
+relations:
+ - - "etcd:certificates"
+ - "easyrsa:client"
diff --git a/etcd/tests/30-deb-snap-migrate.py b/etcd/tests/30-deb-snap-migrate.py
new file mode 100755
index 0000000..007e701
--- /dev/null
+++ b/etcd/tests/30-deb-snap-migrate.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+
+import amulet
+import os
+import re
+import unittest
+import yaml
+
+from amulet.helpers import juju
+
+TEST_TIMEOUT = 600
+
+
+class TestDeployment(unittest.TestCase):
+ bundle_file = os.path.join(os.path.dirname(__file__), '30-deb-bundle.yml')
+
+ @classmethod
+ def setUpClass(cls):
+ cls.d = amulet.Deployment(series='xenial')
+
+ # Deploy the scenario from the bundle
+ with open(cls.bundle_file) as f:
+ bun = f.read()
+ bundle = yaml.safe_load(bun)
+ cls.d.load(bundle)
+ cls.d.setup(timeout=TEST_TIMEOUT)
+
+ cls.etcd = cls.d.sentry['etcd']
+ # This is a hacky work-around to amulet not supporting charm upgrades.
+ juju(['upgrade-charm', 'etcd', '--path', os.getcwd()])
+ # This is kind of a litmus test.
+ cls.d.sentry.wait_for_messages({'etcd':
+ re.compile('snap-upgrade')})
+
+ # this is the legacy location of these TLS certs. As of rev-25 this is
+ # no longer the case, and this is safe to leave as is for the remainder
+ # of this tests lifecycle.
+ certs = "ETCDCTL_KEY_FILE=/etc/ssl/etcd/client.key " \
+ "ETCDCTL_CERT_FILE=/etc/ssl/etcd/client.crt " \
+ "ETCDCTL_CA_FILE=/etc/ssl/etcd/ca.crt"
+
+ # preseed the deployment with some data keys before releasing execution
+ cls.etcd[0].run('{} etcdctl set juju rocks'.format(certs))
+ cls.etcd[0].run('{} etcdctl set nested/data works'.format(certs))
+
+ def test_snap_action(self):
+ ''' When the charm is upgraded, a message should appear requesting the
+ user to run a manual upgrade.'''
+
+ action_id = self.etcd[0].run_action('snap-upgrade')
+ # This by default waits 600 seconds, incrase in slower clouds.
+ out = self.d.get_action_output(action_id, full_output=True)
+ # This will be failed if the upgrade didnt work
+ assert 'completed' in out['status']
+ # This will be missing if the operation bailed early
+ assert 'results' in out.keys()
+ self.validate_running_snap_daemon()
+ self.validate_etcd_fixture_data()
+
+ def test_snap_upgrade_to_three_oh(self):
+ ''' Default configured channel is 2.3/stable. Ensure we can jump to
+ 3.0 '''
+ self.d.configure('etcd', {'channel': '3.0/stable'})
+ self.d.sentry.wait()
+ self.validate_running_snap_daemon()
+ self.validate_etcd_fixture_data()
+
+ def validate_etcd_fixture_data(self):
+ ''' Recall data set by set_etcd_fixture_data to ensure it persisted
+ through the upgrade '''
+
+ # The spacing here is semi-important as its a string of ENV exports
+ # also, this is hard coding for the defaults. if the defaults in
+ # layer.yaml change, this will need to change.
+ certs = "ETCDCTL_KEY_FILE=/var/snap/etcd/common/client.key " \
+ "ETCDCTL_CERT_FILE=/var/snap/etcd/common/client.crt " \
+ "ETCDCTL_CA_FILE=/var/snap/etcd/common/ca.crt"
+
+ jcmd = "{} /snap/bin/etcd.etcdctl get juju".format(certs)
+ juju_key = self.etcd[0].run(jcmd)
+ nscmd = "{} /snap/bin/etcd.etcdctl get nested/data".format(certs)
+ nested_key = self.etcd[0].run(nscmd)
+
+ assert 'rocks' in juju_key[0]
+ assert 'works' in nested_key[0]
+
+ def validate_running_snap_daemon(self):
+ ''' Validate the snap based etcd daemon is running after an op '''
+ daemon_status = self.etcd[0].run('systemctl is-active snap.etcd.etcd')
+ assert 'active' in daemon_status[0]
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/etcd/tests/conftest.py b/etcd/tests/conftest.py
new file mode 100644
index 0000000..97d7668
--- /dev/null
+++ b/etcd/tests/conftest.py
@@ -0,0 +1,5 @@
+import charms.unit_test
+
+
+charms.unit_test.patch_reactive()
+charms.unit_test.patch_module('charms.leadership')
diff --git a/etcd/tests/snap-upgrade.yaml b/etcd/tests/snap-upgrade.yaml
new file mode 100644
index 0000000..6fd8a3d
--- /dev/null
+++ b/etcd/tests/snap-upgrade.yaml
@@ -0,0 +1,3 @@
+tests: "30-*"
+reset: true
+makefile: []
diff --git a/etcd/tests/tests.yaml b/etcd/tests/tests.yaml
new file mode 100644
index 0000000..c8e5257
--- /dev/null
+++ b/etcd/tests/tests.yaml
@@ -0,0 +1,7 @@
+tests: "[1-2]0-*"
+reset: false
+makefile:
+ - lint
+packages:
+ - amulet
+ - tox
diff --git a/etcd/tox.ini b/etcd/tox.ini
new file mode 100644
index 0000000..a89395f
--- /dev/null
+++ b/etcd/tox.ini
@@ -0,0 +1,25 @@
+[flake8]
+max-line-length = 120
+
+[tox]
+skipsdist = True
+envlist = lint,py3
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}:{toxinidir}/lib
+deps =
+ pyyaml
+ pytest
+ pytest-cov
+ flake8
+ git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test
+commands =
+ pytest --cov-report term-missing \
+ --cov lib --cov-fail-under 33 \
+ --tb native -s {posargs}
+
+[testenv:lint]
+envdir = {toxworkdir}/py3
+commands = flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests {toxinidir}/unit_tests
diff --git a/etcd/unit_tests/test_etcdctl.py b/etcd/unit_tests/test_etcdctl.py
new file mode 100644
index 0000000..5a36be7
--- /dev/null
+++ b/etcd/unit_tests/test_etcdctl.py
@@ -0,0 +1,152 @@
+import pytest
+from unittest.mock import patch
+
+from etcdctl import (
+ EtcdCtl,
+ etcdctl_command,
+ get_connection_string,
+) # noqa
+
+from etcd_databag import EtcdDatabag
+
+from reactive.etcd import (
+ pre_series_upgrade,
+ post_series_upgrade,
+ status,
+ clear_flag,
+ host,
+ force_rejoin_requested,
+ force_rejoin,
+)
+
+
+class TestEtcdCtl:
+
+ @pytest.fixture
+ def etcdctl(self):
+ return EtcdCtl()
+
+ def test_register(self, etcdctl):
+ with patch('etcdctl.EtcdCtl.run') as spcm:
+ etcdctl.register({'cluster_address': '127.0.0.1',
+ 'unit_name': 'etcd0',
+ 'management_port': '1313',
+ 'leader_address': 'http://127.1.1.1:1212'})
+ spcm.assert_called_with('member add etcd0 https://127.0.0.1:1313', api=2, endpoints='http://127.1.1.1:1212')
+
+ def test_unregister(self, etcdctl):
+ with patch('etcdctl.EtcdCtl.run') as spcm:
+ etcdctl.unregister('br1212121212')
+
+ spcm.assert_called_with(['member', 'remove', 'br1212121212'], api=2, endpoints=None)
+
+ def test_member_list(self, etcdctl):
+ with patch('etcdctl.EtcdCtl.run') as comock:
+ comock.return_value = '7dc8404daa2b8ca0: name=etcd22 peerURLs=https://10.113.96.220:2380 clientURLs=https://10.113.96.220:2379\n' # noqa
+ members = etcdctl.member_list()
+ assert(members['etcd22']['unit_id'] == '7dc8404daa2b8ca0')
+ assert(members['etcd22']['peer_urls'] == 'https://10.113.96.220:2380')
+ assert(members['etcd22']['client_urls'] == 'https://10.113.96.220:2379')
+
+ def test_member_list_with_unstarted_member(self, etcdctl):
+ ''' Validate we receive information only about members we can parse
+ from the current status string '''
+ # 57fa5c39949c138e[unstarted]: peerURLs=http://10.113.96.80:2380
+ # bb0f83ebb26386f7: name=etcd9 peerURLs=https://10.113.96.178:2380 clientURLs=https://10.113.96.178:2379
+ with patch('etcdctl.EtcdCtl.run') as comock:
+ comock.return_value = '57fa5c39949c138e[unstarted]: peerURLs=http://10.113.96.80:2380]\nbb0f83ebb26386f7: name=etcd9 peerURLs=https://10.113.96.178:2380 clientURLs=https://10.113.96.178:2379\n' # noqa
+ members = etcdctl.member_list()
+ assert(members['etcd9']['unit_id'] == 'bb0f83ebb26386f7')
+ assert(members['etcd9']['peer_urls'] == 'https://10.113.96.178:2380')
+ assert(members['etcd9']['client_urls'] == 'https://10.113.96.178:2379')
+ assert('unstarted' in members.keys())
+ assert(members['unstarted']['unit_id'] == '57fa5c39949c138e')
+ assert("10.113.96.80:2380" in members['unstarted']['peer_urls'])
+
+ def test_etcd_v2_version(self, etcdctl):
+ ''' Validate that etcdctl can parse versions for both etcd v2 and
+ etcd v3 '''
+ # Define fixtures of what we expect for the version output
+ etcdctl_2_version = b"etcdctl version 2.3.8\n"
+ with patch('etcdctl.check_output') as comock:
+ comock.return_value = etcdctl_2_version
+ ver = etcdctl.version()
+ assert(ver == '2.3.8')
+
+ def test_etcd_v3_version(self, etcdctl):
+ ''' Validate that etcdctl can parse version for etcdctl v3 '''
+ etcdctl_3_version = b"etcdctl version: 3.0.17\nAPI version: 2\n"
+ with patch('etcdctl.check_output') as comock:
+ comock.return_value = etcdctl_3_version
+ ver = etcdctl.version()
+ assert(ver == '3.0.17')
+
+ def test_etcdctl_command(self):
+ ''' Validate sane results from etcdctl_command '''
+ assert(isinstance(etcdctl_command(), str))
+
+ def test_etcdctl_environment_with_version_2(self, etcdctl):
+ ''' Validate that environment gets set correctly
+ spoiler alert; it shouldn't be set when passing --version '''
+ with patch('etcdctl.check_output') as comock:
+ etcdctl.run('member list', api=2)
+ api_version = comock.call_args[1].get('env').get('ETCDCTL_API')
+ assert(api_version == '2')
+
+ def test_etcdctl_environment_with_version_3(self, etcdctl):
+ ''' Validate that environment gets set correctly
+ spoiler alert; it shouldn't be set when passing --version '''
+ with patch('etcdctl.check_output') as comock:
+ etcdctl.run('member list', api=3)
+ api_version = comock.call_args[1].get('env').get('ETCDCTL_API')
+ assert(api_version == '3')
+
+ def test_get_connection_string(self):
+ ''' Validate the get_connection_string function
+ gives a sane return.
+ '''
+ assert(
+ get_connection_string(['1.1.1.1'], '1111') ==
+ 'https://1.1.1.1:1111'
+ )
+
+ def test_series_upgrade(self):
+ assert host.service_pause.call_count == 0
+ assert host.service_resume.call_count == 0
+ assert status.blocked.call_count == 0
+ pre_series_upgrade()
+ assert host.service_pause.call_count == 1
+ assert host.service_resume.call_count == 0
+ assert status.blocked.call_count == 1
+ post_series_upgrade()
+ assert host.service_pause.call_count == 1
+ assert host.service_resume.call_count == 1
+ assert status.blocked.call_count == 1
+
+ @patch('reactive.etcd.force_rejoin')
+ @patch('reactive.etcd.check_cluster_health')
+ def test_rejoin_trigger(self, cluster_health_mock,
+ rejoin_mock):
+ """Test that unit will trigger force_rejoin on new request"""
+ force_rejoin_requested()
+
+ rejoin_mock.assert_called_once()
+ cluster_health_mock.assert_called_once()
+
+ @patch('reactive.etcd.register_node_with_leader')
+ @patch('os.path.exists')
+ @patch('shutil.rmtree')
+ @patch('os.path.join')
+ @patch('time.sleep')
+ def test_force_rejoin(self, sleep, path_join, rmtree, path_exists,
+ register_node):
+ """Test that force_rejoin performs required steps."""
+ data_dir = '/foo/bar'
+ path_exists.return_value = True
+ path_join.return_value = data_dir
+ force_rejoin()
+
+ host.service_stop.assert_called_with(EtcdDatabag().etcd_daemon)
+ clear_flag.assert_called_with('etcd.registered')
+ rmtree.assert_called_with(data_dir)
+ register_node.assert_called()
diff --git a/etcd/version b/etcd/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/etcd/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/etcd/wheelhouse.txt b/etcd/wheelhouse.txt
new file mode 100644
index 0000000..15a0aab
--- /dev/null
+++ b/etcd/wheelhouse.txt
@@ -0,0 +1,25 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+# netaddr<=0.7.19 # overridden by etcd
+
+# layer:snap
+tenacity
+
+# etcd
+charms.templating.jinja2>=1.0.0,<2.0.0
+
+# pin netaddr to avoid pulling importlib-resources from above lib
+netaddr<=0.7.19
+
diff --git a/etcd/wheelhouse/Jinja2-2.10.1.tar.gz b/etcd/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/etcd/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/etcd/wheelhouse/MarkupSafe-1.1.1.tar.gz b/etcd/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/etcd/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/etcd/wheelhouse/PyYAML-5.2.tar.gz b/etcd/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/etcd/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/etcd/wheelhouse/Tempita-0.5.2.tar.gz b/etcd/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/etcd/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz b/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/etcd/wheelhouse/charms.reactive-1.4.1.tar.gz b/etcd/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/etcd/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/etcd/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz b/etcd/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz
new file mode 100644
index 0000000..5c03a81
Binary files /dev/null and b/etcd/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz differ
diff --git a/etcd/wheelhouse/netaddr-0.7.19.tar.gz b/etcd/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/etcd/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/etcd/wheelhouse/pbr-5.6.0.tar.gz b/etcd/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/etcd/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/etcd/wheelhouse/pip-18.1.tar.gz b/etcd/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/etcd/wheelhouse/pip-18.1.tar.gz differ
diff --git a/etcd/wheelhouse/pyaml-20.4.0.tar.gz b/etcd/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/etcd/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/etcd/wheelhouse/setuptools-41.6.0.zip b/etcd/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/etcd/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/etcd/wheelhouse/setuptools_scm-1.17.0.tar.gz b/etcd/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/etcd/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/etcd/wheelhouse/six-1.15.0.tar.gz b/etcd/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/etcd/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/etcd/wheelhouse/tenacity-7.0.0.tar.gz b/etcd/wheelhouse/tenacity-7.0.0.tar.gz
new file mode 100644
index 0000000..2050c4d
Binary files /dev/null and b/etcd/wheelhouse/tenacity-7.0.0.tar.gz differ
diff --git a/etcd/wheelhouse/wheel-0.33.6.tar.gz b/etcd/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/etcd/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/kubeapi-load-balancer/.build.manifest b/kubeapi-load-balancer/.build.manifest
new file mode 100644
index 0000000..0359a9e
--- /dev/null
+++ b/kubeapi-load-balancer/.build.manifest
@@ -0,0 +1,926 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46",
+ "url": "layer:metrics"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e",
+ "url": "layer:nagios"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34",
+ "url": "layer:apt"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "672d27695b512e50f51777b1eb63c5ff157b3d9e",
+ "url": "layer:nginx"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fb46dec78d390571753d21876bbba689bbbca9e4",
+ "url": "layer:tls-client"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "763297a075b3654f261af20c84b940d87f55354e",
+ "url": "layer:kubernetes-common"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a8f88f16bb7771807a0f7fdb17ee16b0e310fc2b",
+ "url": "layer:hacluster"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "74da66505e2e8470cd47ed0c1d56fcec843da87b",
+ "url": "kubeapi-load-balancer"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc",
+ "url": "interface:nrpe-external-master"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09",
+ "url": "interface:http"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d9850016d930a6d507b9fd45e2598d327922b140",
+ "url": "interface:tls-certificates"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "6c611a3c61909fda411f7a79af53908ec7bef2c8",
+ "url": "interface:hacluster"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27",
+ "url": "interface:public-address"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/build.yml": [
+ "kubeapi-load-balancer",
+ "static",
+ "f6bb08b7b2cffefc0cacdee5bb1c356f30782dbc6be5591f2db186fcd446d43f"
+ ],
+ ".github/workflows/tox.yaml": [
+ "kubeapi-load-balancer",
+ "static",
+ "c323f9ca1fe5bf1369f80d8958be49ad8fd2f6635528865017c357591d31542e"
+ ],
+ ".gitignore": [
+ "kubeapi-load-balancer",
+ "static",
+ "3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "AUTHORS": [
+ "layer:nginx",
+ "static",
+ "5e460cc5d7fe5ce6dc5c4e8eefc13159ee58874667baf9af3b5fa9b597a10fa2"
+ ],
+ "CONTRIBUTING.md": [
+ "kubeapi-load-balancer",
+ "static",
+ "7155516596ae597b0b7065f0463ff69031d689c0fc565998b51c06d999129d5a"
+ ],
+ "LICENSE": [
+ "kubeapi-load-balancer",
+ "static",
+ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
+ ],
+ "Makefile": [
+ "kubeapi-load-balancer",
+ "static",
+ "49ced5fd917cecc5aa65c83ffa2a829de8e02e7c0fb8c0e88163064e7b93f8af"
+ ],
+ "README.md": [
+ "kubeapi-load-balancer",
+ "static",
+ "55d7f0325996fff89a6b4277acd58e0e04c62a977ab83bd45c288789cd29dd9d"
+ ],
+ "actions.yaml": [
+ "layer:debug",
+ "dynamic",
+ "cea290e28bc78458ea4a56dcad39b9a880c67e4ba53b774ac46bd8778618c7b9"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "kubeapi-load-balancer",
+ "dynamic",
+ "586a155cd5fb93090f379e3c1ec9d350b89d73c58ebad447b03e36a886010ba7"
+ ],
+ "copyright": [
+ "kubeapi-load-balancer",
+ "static",
+ "badd4492d214890abd07b615f9e1a7a5ff3339b6c44655a826c746a9263ff00d"
+ ],
+ "copyright.layer-apt": [
+ "layer:apt",
+ "static",
+ "5123b2d0220fefb4424a463216fb41a6dd7cfad49c9799ba7037f1e74a2fd6bc"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-metrics": [
+ "layer:metrics",
+ "static",
+ "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c"
+ ],
+ "copyright.layer-nagios": [
+ "layer:nagios",
+ "static",
+ "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749"
+ ],
+ "copyright.layer-nginx": [
+ "layer:nginx",
+ "static",
+ "66b7d69f452f9203cbf702c57c58b16b359be9970781deb0e21893620dd52516"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-status": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "debug-scripts/tls-certs": [
+ "layer:tls-client",
+ "static",
+ "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "hooks/apiserver-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/apiserver-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/apiserver-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/apiserver-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/apiserver-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/collect-metrics": [
+ "layer:metrics",
+ "static",
+ "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/hacluster/.stestr.conf": [
+ "interface:hacluster",
+ "static",
+ "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774"
+ ],
+ "hooks/relations/hacluster/README.md": [
+ "interface:hacluster",
+ "static",
+ "7fad91e409c6e559cdb76d11c89c325531adc25679049a629a28c4f890755f1f"
+ ],
+ "hooks/relations/hacluster/__init__.py": [
+ "interface:hacluster",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/hacluster/common.py": [
+ "interface:hacluster",
+ "static",
+ "cd9f765e2c3ff64a592c8e144a36783e48c1033413cbece2c4f579195cb7ff5e"
+ ],
+ "hooks/relations/hacluster/copyright": [
+ "interface:hacluster",
+ "static",
+ "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893"
+ ],
+ "hooks/relations/hacluster/interface.yaml": [
+ "interface:hacluster",
+ "static",
+ "51bcf4e36b973600d567cf96783bdee3eaa6e164275f70b69e2e47e3468c8c8b"
+ ],
+ "hooks/relations/hacluster/requires.py": [
+ "interface:hacluster",
+ "static",
+ "eb752e55844ffbfddf9a98e80ac282ff832ab667c1a33b743940babbd048bb17"
+ ],
+ "hooks/relations/hacluster/test-requirements.txt": [
+ "interface:hacluster",
+ "static",
+ "2c37d84ada8578ba5ed44f99f10470710c91d370052a867541f31b5c6a357b07"
+ ],
+ "hooks/relations/http/.gitignore": [
+ "interface:http",
+ "static",
+ "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8"
+ ],
+ "hooks/relations/http/README.md": [
+ "interface:http",
+ "static",
+ "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d"
+ ],
+ "hooks/relations/http/__init__.py": [
+ "interface:http",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/http/interface.yaml": [
+ "interface:http",
+ "static",
+ "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13"
+ ],
+ "hooks/relations/http/provides.py": [
+ "interface:http",
+ "static",
+ "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877"
+ ],
+ "hooks/relations/http/requires.py": [
+ "interface:http",
+ "static",
+ "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd"
+ ],
+ "hooks/relations/nrpe-external-master/README.md": [
+ "interface:nrpe-external-master",
+ "static",
+ "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636"
+ ],
+ "hooks/relations/nrpe-external-master/__init__.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/nrpe-external-master/interface.yaml": [
+ "interface:nrpe-external-master",
+ "static",
+ "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0"
+ ],
+ "hooks/relations/nrpe-external-master/provides.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e6ba708d05b227b139a86be59c83ed95a2bad030bc81e5819167ba5e1e67ecd4"
+ ],
+ "hooks/relations/nrpe-external-master/requires.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/public-address/README.md": [
+ "interface:public-address",
+ "static",
+ "7225effe61bfd8571447b8b685a2ecb52be17431b3066a5306330954c4cb064d"
+ ],
+ "hooks/relations/public-address/__init__.py": [
+ "interface:public-address",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/public-address/interface.yaml": [
+ "interface:public-address",
+ "static",
+ "49d6777a54aa84c7d3be8d531be237564e90f2e4cb2be05ef5617a372a382340"
+ ],
+ "hooks/relations/public-address/provides.py": [
+ "interface:public-address",
+ "static",
+ "7c99b0fe987d38773ed3e67c0378fdb78748c04d6895489cd4bca40aaeb051b2"
+ ],
+ "hooks/relations/public-address/requires.py": [
+ "interface:public-address",
+ "static",
+ "d6a7c6c0762d29a5db19afb4cf82af50812988d5e19a3a48fcbe8b0f6fec12a5"
+ ],
+ "hooks/relations/tls-certificates/.gitignore": [
+ "interface:tls-certificates",
+ "static",
+ "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc"
+ ],
+ "hooks/relations/tls-certificates/README.md": [
+ "interface:tls-certificates",
+ "static",
+ "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a"
+ ],
+ "hooks/relations/tls-certificates/__init__.py": [
+ "interface:tls-certificates",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/docs/common.md": [
+ "interface:tls-certificates",
+ "static",
+ "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff"
+ ],
+ "hooks/relations/tls-certificates/docs/provides.md": [
+ "interface:tls-certificates",
+ "static",
+ "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe"
+ ],
+ "hooks/relations/tls-certificates/docs/requires.md": [
+ "interface:tls-certificates",
+ "static",
+ "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e"
+ ],
+ "hooks/relations/tls-certificates/interface.yaml": [
+ "interface:tls-certificates",
+ "static",
+ "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa"
+ ],
+ "hooks/relations/tls-certificates/make_docs": [
+ "interface:tls-certificates",
+ "static",
+ "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37"
+ ],
+ "hooks/relations/tls-certificates/provides.py": [
+ "interface:tls-certificates",
+ "static",
+ "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c"
+ ],
+ "hooks/relations/tls-certificates/pydocmd.yml": [
+ "interface:tls-certificates",
+ "static",
+ "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8"
+ ],
+ "hooks/relations/tls-certificates/requires.py": [
+ "interface:tls-certificates",
+ "static",
+ "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba"
+ ],
+ "hooks/relations/tls-certificates/tls_certificates_common.py": [
+ "interface:tls-certificates",
+ "static",
+ "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c"
+ ],
+ "hooks/relations/tls-certificates/tox.ini": [
+ "interface:tls-certificates",
+ "static",
+ "7ab8ab53e5ed98cfa7fb5c1d5009f84077a4bb76640ba64f561ef7ea3a702eab"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "kubeapi-load-balancer",
+ "static",
+ "92271bf7063cc3a85a6d0fe2841250cf9bf8cd72697f3655f03ada39f8aee029"
+ ],
+ "layer.yaml": [
+ "kubeapi-load-balancer",
+ "dynamic",
+ "98380972be2b81b4b27449ff197b8bccdfd9c427df3e8792a5fd530365e84d92"
+ ],
+ "lib/.gitkeep": [
+ "layer:nginx",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "lib/charms/apt.py": [
+ "layer:apt",
+ "static",
+ "c7613992eb33ac94d83fbf02f467b614ea5112eaf561c4715def90989cefa531"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/hacluster.py": [
+ "layer:hacluster",
+ "static",
+ "f58e0c1503187247f858ff3c9a1166d59107afd1557ba89e4878ec2e79304f8a"
+ ],
+ "lib/charms/layer/kubernetes_common.py": [
+ "layer:kubernetes-common",
+ "static",
+ "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101"
+ ],
+ "lib/charms/layer/nagios.py": [
+ "layer:nagios",
+ "static",
+ "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c"
+ ],
+ "lib/charms/layer/nginx.py": [
+ "layer:nginx",
+ "static",
+ "5fea9e756b8e9ad09d0256d9f2a1e8e2169a97741af256653ca85b4412e40174"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/charms/layer/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "lib/nginxlib.py": [
+ "layer:nginx",
+ "static",
+ "bae474acba0fbf9da21f1372dcda1dba848757c5e7cebb6fb22c29f04a67c0aa"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "kubeapi-load-balancer",
+ "dynamic",
+ "7224029776479946a04ca0237cadd1e16bcc3fa7e138d7732e30e0af07d0cd73"
+ ],
+ "metrics.yaml": [
+ "kubeapi-load-balancer",
+ "static",
+ "94a5eb0b0966f8ba434d91ff1e9b99b1b4c3b3044657b236d4e742d3e0d57c47"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "pyproject.toml": [
+ "layer:apt",
+ "static",
+ "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de"
+ ],
+ "reactive/__init__.py": [
+ "layer:basic",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/apt.py": [
+ "layer:apt",
+ "static",
+ "6fe40f18eb84a910a71a4acb7ec74856128de846de6029b4fc297a875692c837"
+ ],
+ "reactive/hacluster.py": [
+ "layer:hacluster",
+ "static",
+ "0b34980232eec9866c85b55070db7e72a04689f92b338207c5839531abd0eadc"
+ ],
+ "reactive/load_balancer.py": [
+ "kubeapi-load-balancer",
+ "static",
+ "77a41c7fb062e3091abc6e2a57a648722d17ca11a01c88c33069eed7d413296a"
+ ],
+ "reactive/nginx.py": [
+ "layer:nginx",
+ "static",
+ "046769111b72a5a5aa7bfd6362db988361719586bee4e9b40a472f33c0cf09a8"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "reactive/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "script/bootstrap": [
+ "kubeapi-load-balancer",
+ "static",
+ "e0c77e16a79dcb31cb6378687e3465151a74fd8e6dd2083a662fb8c1fe5168e2"
+ ],
+ "script/build": [
+ "kubeapi-load-balancer",
+ "static",
+ "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633"
+ ],
+ "script/upload": [
+ "kubeapi-load-balancer",
+ "static",
+ "8a13f3dade7374df2250ac04dc82fb3a39a328412ed384721576852a54a34114"
+ ],
+ "templates/.gitkeep": [
+ "layer:nginx",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "templates/apilb.conf": [
+ "kubeapi-load-balancer",
+ "static",
+ "ec8fc3d9cb4ff7ec8499ad6900e813cfbb2fbe7b802944d3aa0ce1d12963be52"
+ ],
+ "templates/vhost.conf.ex": [
+ "layer:nginx",
+ "static",
+ "f68c366c35a8487acb78da6f1086eeee33a3eccdbe5a524509039c0c41ad5d5a"
+ ],
+ "tests/conftest.py": [
+ "kubeapi-load-balancer",
+ "static",
+ "6b67fae874cf23514acce521237850807e1b45f5ddaac1777237392e66b8ad53"
+ ],
+ "tests/test_kubeapi_load_balancer.py": [
+ "kubeapi-load-balancer",
+ "static",
+ "8c31c2541800259eab3461d0295ed0c76d763596b2a99a5ecdd683d65402517f"
+ ],
+ "tox.ini": [
+ "kubeapi-load-balancer",
+ "static",
+ "85b2e7b5880fe8cc3f0fbbfb3496c2a8718c775aee7b8002929a596d35927073"
+ ],
+ "version": [
+ "kubeapi-load-balancer",
+ "dynamic",
+ "f7b6b97993cc32152f2c110a487f9eac0896218e2292a13c252976d9548e3435"
+ ],
+ "wheelhouse.txt": [
+ "layer:nginx",
+ "dynamic",
+ "27c996e4c9738557fed60f48dc535fbec68415f08303743d23d1ed51675a361d"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/toml-0.10.2.tar.gz": [
+ "layer:nginx",
+ "dynamic",
+ "b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/kubeapi-load-balancer/.github/workflows/build.yml b/kubeapi-load-balancer/.github/workflows/build.yml
new file mode 100644
index 0000000..eb64988
--- /dev/null
+++ b/kubeapi-load-balancer/.github/workflows/build.yml
@@ -0,0 +1,16 @@
+name: Builds kubeapi-load-balancer charm
+on: [push, pull_request]
+
+jobs:
+ build:
+ name: Build charm
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+ - name: Run build
+ run: |
+ make charm
diff --git a/kubeapi-load-balancer/.github/workflows/tox.yaml b/kubeapi-load-balancer/.github/workflows/tox.yaml
new file mode 100644
index 0000000..b07172d
--- /dev/null
+++ b/kubeapi-load-balancer/.github/workflows/tox.yaml
@@ -0,0 +1,22 @@
+name: Run tests with Tox
+
+on: [push]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.5, 3.6, 3.7, 3.8]
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Tox and any other packages
+ run: pip install tox
+ - name: Run Tox
+ run: tox -e py # Run tox using the version of Python in `PATH`
diff --git a/kubeapi-load-balancer/.gitignore b/kubeapi-load-balancer/.gitignore
new file mode 100644
index 0000000..cc02691
--- /dev/null
+++ b/kubeapi-load-balancer/.gitignore
@@ -0,0 +1,3 @@
+.tox/
+__pycache__/
+*.pyc
diff --git a/kubeapi-load-balancer/.travis/profile-update.yaml b/kubeapi-load-balancer/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/kubeapi-load-balancer/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/kubeapi-load-balancer/AUTHORS b/kubeapi-load-balancer/AUTHORS
new file mode 100644
index 0000000..60e3e7d
--- /dev/null
+++ b/kubeapi-load-balancer/AUTHORS
@@ -0,0 +1,2 @@
+Adam Stokes
+Marco Ceppi
diff --git a/kubeapi-load-balancer/CONTRIBUTING.md b/kubeapi-load-balancer/CONTRIBUTING.md
new file mode 100644
index 0000000..f198d7c
--- /dev/null
+++ b/kubeapi-load-balancer/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, please use the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-kubeapi-load-balancer/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-kubeapi-load-balancer.md)
diff --git a/kubeapi-load-balancer/LICENSE b/kubeapi-load-balancer/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/kubeapi-load-balancer/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/kubeapi-load-balancer/Makefile b/kubeapi-load-balancer/Makefile
new file mode 100644
index 0000000..36d42c8
--- /dev/null
+++ b/kubeapi-load-balancer/Makefile
@@ -0,0 +1,18 @@
+CHANNEL ?= unpublished
+CHARM := kubeapi-load-balancer
+
+setup-env:
+ bash script/bootstrap
+
+charm: setup-env
+ bash script/build
+
+upload:
+ifndef NAMESPACE
+ $(error NAMESPACE is not set)
+endif
+
+ env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload
+
+.phony: charm upload setup-env
+all: charm
diff --git a/kubeapi-load-balancer/README.md b/kubeapi-load-balancer/README.md
new file mode 100644
index 0000000..2809e47
--- /dev/null
+++ b/kubeapi-load-balancer/README.md
@@ -0,0 +1,15 @@
+# kubeapi-load-balancer
+
+Simple NGINX reverse proxy to lend a hand in HA kubernetes-master deployments.
+
+
+This charm is a component of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-kubeapi-load-balancer).
+
+# Developers
+
+## Building the charm
+
+```
+make charm
+```
diff --git a/kubeapi-load-balancer/actions.yaml b/kubeapi-load-balancer/actions.yaml
new file mode 100644
index 0000000..8712b6b
--- /dev/null
+++ b/kubeapi-load-balancer/actions.yaml
@@ -0,0 +1,2 @@
+"debug":
+ "description": "Collect debug data"
diff --git a/kubeapi-load-balancer/actions/debug b/kubeapi-load-balancer/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/kubeapi-load-balancer/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/kubeapi-load-balancer/bin/charm-env b/kubeapi-load-balancer/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/kubeapi-load-balancer/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/kubeapi-load-balancer/bin/layer_option b/kubeapi-load-balancer/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/kubeapi-load-balancer/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/kubeapi-load-balancer/config.yaml b/kubeapi-load-balancer/config.yaml
new file mode 100644
index 0000000..e6a860d
--- /dev/null
+++ b/kubeapi-load-balancer/config.yaml
@@ -0,0 +1,91 @@
+"options":
+ "nagios_context":
+ "default": "juju"
+ "type": "string"
+ "description": |
+ Used by the nrpe subordinate charms.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ "nagios_servicegroups":
+ "default": ""
+ "type": "string"
+ "description": |
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
+ "extra_packages":
+ "description": >
+ Space separated list of extra deb packages to install.
+ "type": "string"
+ "default": ""
+ "package_status":
+ "default": "install"
+ "type": "string"
+ "description": >
+ The status of service-affecting packages will be set to this
+ value in the dpkg database. Valid values are "install" and "hold".
+ "install_sources":
+ "description": >
+ List of extra apt sources, per charm-helpers standard
+ format (a yaml list of strings encoded as a string). Each source
+ may be either a line that can be added directly to
+ sources.list(5), or in the form ppa:/ for adding
+ Personal Package Archives, or a distribution component to enable.
+ "type": "string"
+ "default": ""
+ "install_keys":
+ "description": >
+ List of signing keys for install_sources package sources, per
+ charmhelpers standard format (a yaml list of strings encoded as
+ a string). The keys should be the full ASCII armoured GPG public
+ keys. While GPG key ids are also supported and looked up on a
+ keyserver, operators should be aware that this mechanism is
+ insecure. null can be used if a standard package signing key is
+ used that will already be installed on the machine, and for PPA
+ sources where the package signing key is securely retrieved from
+ Launchpad.
+ "type": "string"
+ "default": ""
+ "port":
+ "type": "int"
+ "default": !!int "443"
+ "description": |-
+ The port to run the loadbalancer
+ "host":
+ "type": "string"
+ "default": "127.0.0.1"
+ "description": "listen address"
+ "ha-cluster-vip":
+ "type": "string"
+ "description": |
+ Virtual IP for the charm to use with the HA Cluster subordinate charm
+ Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are
+ separated by spaces.
+ "default": ""
+ "ha-cluster-dns":
+ "type": "string"
+ "description": |
+ DNS entry to use with the HA Cluster subordinate charm.
+ Mutually exclusive with ha-cluster-vip.
+ "default": ""
+ "extra_sans":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space-separated list of extra SAN entries to add to the x509 certificate
+ created for the load balancers.
+ "proxy_read_timeout":
+ "type": "int"
+ "default": !!int "600"
+ "description": "Timeout in seconds for reading a response from proxy server."
+ "loadbalancer-ips":
+ "type": "string"
+ "description": |
+ Space seperated list of IP addresses of loadbalancers in front of control plane.
+ A common case for this is virtual IP addresses that are floated in front of the
+ kubeapi-load-balancer charm. The workers will alternate IP addresses from this
+ list to distribute load. If you have 2 IPs and 4 workers, each IP will be used
+ by 2 workers.
+ "default": ""
diff --git a/kubeapi-load-balancer/copyright b/kubeapi-load-balancer/copyright
new file mode 100644
index 0000000..ac5e525
--- /dev/null
+++ b/kubeapi-load-balancer/copyright
@@ -0,0 +1,13 @@
+Copyright 2016 The Kubernetes Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubeapi-load-balancer/copyright.layer-apt b/kubeapi-load-balancer/copyright.layer-apt
new file mode 100644
index 0000000..0814dc1
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-apt
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Apt layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubeapi-load-balancer/copyright.layer-basic b/kubeapi-load-balancer/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubeapi-load-balancer/copyright.layer-metrics b/kubeapi-load-balancer/copyright.layer-metrics
new file mode 100644
index 0000000..2df15bd
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-metrics
@@ -0,0 +1,13 @@
+Copyright 2016 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubeapi-load-balancer/copyright.layer-nagios b/kubeapi-load-balancer/copyright.layer-nagios
new file mode 100644
index 0000000..c80db95
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-nagios
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2016, Canonical Ltd.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 3, as
+ published by the Free Software Foundation.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranties of
+ MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
diff --git a/kubeapi-load-balancer/copyright.layer-nginx b/kubeapi-load-balancer/copyright.layer-nginx
new file mode 100644
index 0000000..953f220
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-nginx
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Adam Stokes
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/kubeapi-load-balancer/copyright.layer-options b/kubeapi-load-balancer/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubeapi-load-balancer/copyright.layer-status b/kubeapi-load-balancer/copyright.layer-status
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubeapi-load-balancer/copyright.layer-status
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubeapi-load-balancer/debug-scripts/charm-unitdata b/kubeapi-load-balancer/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/kubeapi-load-balancer/debug-scripts/filesystem b/kubeapi-load-balancer/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/kubeapi-load-balancer/debug-scripts/juju-logs b/kubeapi-load-balancer/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/kubeapi-load-balancer/debug-scripts/juju-network-get b/kubeapi-load-balancer/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/kubeapi-load-balancer/debug-scripts/network b/kubeapi-load-balancer/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/kubeapi-load-balancer/debug-scripts/packages b/kubeapi-load-balancer/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/kubeapi-load-balancer/debug-scripts/sysctl b/kubeapi-load-balancer/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/kubeapi-load-balancer/debug-scripts/systemd b/kubeapi-load-balancer/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/kubeapi-load-balancer/debug-scripts/tls-certs b/kubeapi-load-balancer/debug-scripts/tls-certs
new file mode 100755
index 0000000..2692e51
--- /dev/null
+++ b/kubeapi-load-balancer/debug-scripts/tls-certs
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import shutil
+import traceback
+import debug_script
+from charms import layer
+
+options = layer.options.get('tls-client')
+
+def copy_cert(source_key, name):
+ try:
+ source = options[source_key]
+ dest = os.path.join(debug_script.dir, name)
+ shutil.copy(source, dest)
+ except Exception:
+ traceback.print_exc()
+
+copy_cert('client_certificate_path', 'client.crt')
+copy_cert('server_certificate_path', 'server.crt')
+copy_cert('ca_certificate_path', 'ca.crt')
diff --git a/kubeapi-load-balancer/docs/status.md b/kubeapi-load-balancer/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/kubeapi-load-balancer/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-broken b/kubeapi-load-balancer/hooks/apiserver-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/apiserver-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-changed b/kubeapi-load-balancer/hooks/apiserver-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/apiserver-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-created b/kubeapi-load-balancer/hooks/apiserver-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/apiserver-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-departed b/kubeapi-load-balancer/hooks/apiserver-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/apiserver-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-joined b/kubeapi-load-balancer/hooks/apiserver-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/apiserver-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/certificates-relation-broken b/kubeapi-load-balancer/hooks/certificates-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/certificates-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/certificates-relation-changed b/kubeapi-load-balancer/hooks/certificates-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/certificates-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/certificates-relation-created b/kubeapi-load-balancer/hooks/certificates-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/certificates-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/certificates-relation-departed b/kubeapi-load-balancer/hooks/certificates-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/certificates-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/certificates-relation-joined b/kubeapi-load-balancer/hooks/certificates-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/certificates-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/collect-metrics b/kubeapi-load-balancer/hooks/collect-metrics
new file mode 100755
index 0000000..8a27863
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/collect-metrics
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+import yaml
+import os
+from subprocess import check_output, check_call, CalledProcessError
+
+
+def build_command(doc):
+ values = {}
+ metrics = doc.get("metrics", {})
+ for metric, mdoc in metrics.items():
+ if not mdoc:
+ continue
+ cmd = mdoc.get("command")
+ if cmd:
+ try:
+ value = check_output(cmd, shell=True, universal_newlines=True)
+ except CalledProcessError as e:
+ check_call(['juju-log', '-lERROR',
+ 'Error collecting metric {}:\n{}'.format(
+ metric, e.output)])
+ continue
+ value = value.strip()
+ if value:
+ values[metric] = value
+
+ if not values:
+ return None
+ command = ["add-metric"]
+ for metric, value in values.items():
+ command.append("%s=%s" % (metric, value))
+ return command
+
+
+if __name__ == '__main__':
+ charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, "..")))
+ metrics_yaml = os.path.join(charm_dir, "metrics.yaml")
+ with open(metrics_yaml) as f:
+ doc = yaml.load(f)
+ command = build_command(doc)
+ if command:
+ check_call(command)
diff --git a/kubeapi-load-balancer/hooks/config-changed b/kubeapi-load-balancer/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/ha-relation-broken b/kubeapi-load-balancer/hooks/ha-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/ha-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/ha-relation-changed b/kubeapi-load-balancer/hooks/ha-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/ha-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/ha-relation-created b/kubeapi-load-balancer/hooks/ha-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/ha-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/ha-relation-departed b/kubeapi-load-balancer/hooks/ha-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/ha-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/ha-relation-joined b/kubeapi-load-balancer/hooks/ha-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/ha-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/hook.template b/kubeapi-load-balancer/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/install b/kubeapi-load-balancer/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/leader-elected b/kubeapi-load-balancer/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/leader-settings-changed b/kubeapi-load-balancer/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-broken b/kubeapi-load-balancer/hooks/loadbalancer-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-changed b/kubeapi-load-balancer/hooks/loadbalancer-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-created b/kubeapi-load-balancer/hooks/loadbalancer-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-departed b/kubeapi-load-balancer/hooks/loadbalancer-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-joined b/kubeapi-load-balancer/hooks/loadbalancer-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-created b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/post-series-upgrade b/kubeapi-load-balancer/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/pre-series-upgrade b/kubeapi-load-balancer/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf b/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf
new file mode 100644
index 0000000..5fcccac
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=./unit_tests
+top_dir=./
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/README.md b/kubeapi-load-balancer/hooks/relations/hacluster/README.md
new file mode 100644
index 0000000..e8147ac
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/README.md
@@ -0,0 +1,90 @@
+# Overview
+
+This interface handles the communication with the hacluster subordinate
+charm using the `ha` interface protocol.
+
+# Usage
+
+## Requires
+
+The interface layer will set the following reactive states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established and ready for
+ the local charm to configure the hacluster subordinate charm. The
+ configuration of the resources to manage for the hacluster charm
+ can be managed via one of the following methods:
+
+ * `manage_resources` method
+ * `bind_on` method
+
+ Configuration of the managed resources within the hacluster can be
+ managed by passing `common.CRM` object definitions to the
+ `manage_resources` method.
+
+ * `{relation_name}.available` The hacluster is up and ready.
+
+For example:
+```python
+from charms.reactive import when, when_not
+from charms.reactive import set_state, remove_state
+
+from relations.hacluster.common import CRM
+
+
+@when('ha.connected')
+def cluster_connected(hacluster):
+
+ resources = CRM()
+ resources.primitive('res_vip', 'ocf:IPAddr2',
+ params='ip=10.0.3.100 nic=eth0',
+ op='monitor interval="10s"')
+ resources.clone('cl_res_vip', 'res_vip')
+
+ hacluster.bind_on(iface='eth0', mcastport=4430)
+ hacluster.manage_resources(resources)
+```
+
+Additionally, for more code clarity a custom object implements the interface
+defined in common.ResourceDescriptor can be used to simplify the code for
+reuse.
+
+For example:
+```python
+import ipaddress
+
+from relation.hacluster.common import CRM
+from relation.hacluster.common import ResourceDescriptor
+
+class VirtualIP(ResourceDescriptor):
+ def __init__(self, vip, nic='eth0'):
+ self.vip = vip
+ self.nic = 'eth0'
+
+ def configure_resource(self, crm):
+ ipaddr = ipaddress.ip_address(self.vip)
+ if isinstance(ipaddr, ipaddress.IPv4Address):
+ res_type = 'ocf:heartbeat:IPAddr2'
+ res_parms = 'ip={ip} nic={nic}'.format(ip=self.vip,
+ nic=self.nic)
+ else:
+ res_type = 'ocf:heartbeat:IPv6addr'
+ res_params = 'ipv6addr={ip} nic={nic}'.format(ip=self.vip,
+ nic=self.nic)
+
+ crm.primitive('res_vip', res_type, params=res_params,
+ op='monitor interval="10s"')
+ crm.clone('cl_res_vip', 'res_vip')
+```
+
+Once the VirtualIP class above has been defined in charm code, it can make
+the code a bit cleaner. The example above can thusly be written as:
+
+```python
+@when('ha.connected')
+def cluster_connected(hacluster):
+ resources = CRM()
+ resources.add(VirtualIP('10.0.3.100'))
+
+ hacluster.bind_on(iface='eth0', mcastport=4430)
+ hacluster.manage_resources(resources)
+```
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/__init__.py b/kubeapi-load-balancer/hooks/relations/hacluster/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/common.py b/kubeapi-load-balancer/hooks/relations/hacluster/common.py
new file mode 100644
index 0000000..d896510
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/common.py
@@ -0,0 +1,726 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import ipaddress
+from six import string_types
+
+
+class CRM(dict):
+ """
+ Configuration object for Pacemaker resources for the HACluster
+ interface. This class provides access to the supported resources
+ available in the 'crm configure' within the HACluster.
+
+ See Also
+ --------
+ More documentation is available regarding the definitions of
+ primitives, clones, and other pacemaker resources at the crmsh
+ site at http://crmsh.github.io/man
+ """
+
+ # Constants provided for ordering constraints (e.g. the kind value)
+ MANDATORY = "Mandatory"
+ OPTIONAL = "Optional"
+ SERIALIZE = "Serialize"
+
+ # Constants defining weights of constraints
+ INFINITY = "inf"
+ NEG_INFINITY = "-inf"
+
+ # Constaints aliased to their interpretations for constraints
+ ALWAYS = INFINITY
+ NEVER = NEG_INFINITY
+
+ def __init__(self, *args, **kwargs):
+ self['resources'] = {}
+ self['delete_resources'] = []
+ self['resource_params'] = {}
+ self['groups'] = {}
+ self['ms'] = {}
+ self['orders'] = {}
+ self['colocations'] = {}
+ self['clones'] = {}
+ self['locations'] = {}
+ self['init_services'] = []
+ self['systemd_services'] = []
+ super(CRM, self).__init__(*args, **kwargs)
+
+ def primitive(self, name, agent, description=None, **kwargs):
+ """Configures a primitive resource within Pacemaker.
+
+ A primitive is used to describe a resource which should be managed
+ by the cluster. Primitives consist of a name, the agent type, and
+ various configuration options to the primitive. For example:
+
+ crm.primitive('www8', 'apache',
+ params='configfile=/etc/apache/www8.conf',
+ operations='$id-ref=apache_ops')
+
+ will create the an apache primitive (resource) for the www8 service
+ hosted by the Apache HTTP server. The parameters specified can either
+ be provided individually (e.g. a string) or as an iterable.
+
+ The following example shows how to specify multiple ops for a drbd
+ volume in a Master/Slave configuration::
+
+ ops = ['monitor role=Master interval=60s',
+ 'monitor role=Slave interval=300s']
+
+ crm.primitive('r0', 'ocf:linbit:drbd',
+ params='drbd_resource=r0',
+ op=ops)
+
+ Additional arguments may be passed in as kwargs in which the key of
+ the kwarg is prepended to the value.
+
+ Parameters
+ ----------
+ name: str
+ the name of the primitive.
+ agent: str
+ the type of agent to use to monitor the primitive resource
+ (e.g. ocf:linbit:drbd).
+ description: str, optional, kwarg
+ a description about the resource
+ params: str or iterable, optional, kwarg
+ parameters which are provided to the resource agent
+ meta: str or iterable, optional, kwarg
+ metadata information for the primitive resource
+ utilization: str or iterable, optional, kwarg
+ utilization information for the primitive resource
+ operations: str or iterable, optional, kwarg
+ operations information for the primitive resource in id_spec
+ format (e.g. $id= or $id-ref=)
+ op: str or iterable, optional, kwarg
+ op information regarding the primitive resource. This takes the
+ form of ' [== ...]'
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_primitive
+ """
+ resources = self['resources']
+ resources[name] = agent
+
+ specs = ''
+ if description:
+ specs = specs + 'description="%s"' % description
+
+ # Use the ordering specified in the crm manual
+ for key in 'params', 'meta', 'utilization', 'operations', 'op':
+ if key not in kwargs:
+ continue
+ specs = specs + (' %s' % self._parse(key, kwargs[key]))
+
+ if specs:
+ self['resource_params'][name] = specs
+
+ def _parse(self, prefix, data):
+ results = ''
+ if isinstance(data, string_types):
+ data = [data]
+
+ first = True
+ for d in data:
+ if first:
+ results = results + ' '
+ first = False
+ results = results + ('%s %s ' % (prefix, d))
+ results = results.rstrip()
+ return results
+
+ def clone(self, name, resource, description=None, **kwargs):
+ """Creates a resource which should run on all nodes.
+
+ Parameters
+ ----------
+ name: str
+ the name of the clone
+ resource: str
+ the name or id of the resource to clone
+ description: str, optional
+ text containing a description for the clone
+ meta: str or list of str, optional, kwarg
+ metadata attributes to assign to the clone
+ params: str or list of str, optional, kwarg
+ parameters to assign to the clone
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_clone
+ """
+ clone_specs = resource
+ if description:
+ clone_specs = clone_specs + (' description="%s"' % description)
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ if not value:
+ continue
+ clone_specs = clone_specs + (' %s' % self._parse(key, value))
+
+ self['clones'][name] = clone_specs
+
+ def colocation(self, name, score=ALWAYS, *resources, **kwargs):
+ """Configures the colocation constraints of resources.
+
+ Provides placement constraints regarding resources defined within
+ the cluster. Using the colocate function, resource affinity or
+ anti-affinity can be defined.
+
+ For example, the following code ensures that the nova-console service
+ always runs where the cluster vip is running:
+
+ crm.colocation('console_with_vip', ALWAYS,
+ 'nova-console', 'vip')
+
+ The affinity or anti-affinity of resources relationships is be
+ expressed in the `score` parameter. A positive score indicates that
+ the resources should run on the same node.A score of INFINITY (or
+ ALWAYS) will ensure the resources are always run on the same node(s)
+ and a score of NEG_INFINITY (or NEVER) ensures that the resources are
+ never run on the same node(s).
+
+ crm.colocation('never_apache_with_dummy', NEVER,
+ 'apache', 'dummy')
+
+ Any *resources values which are provided are treated as resources which
+ the colocation constraint applies to. At least two resources must be
+ defined as part of the ordering constraint.
+
+ The resources take the form of [:role]. If the
+ colocation constraint applies specifically to a role, this information
+ should be included int he resource supplied.
+
+ Parameters
+ ----------
+ id: str
+ id or name of the colocation constraint
+ score: str {ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY} or int
+ the score or weight of the colocation constraint. A positive value
+ will indicate that the resources should run on the same node. A
+ negative value indicates that the resources should run on separate
+ nodes.
+ resources: str or list
+ the list of resources which the colocation constraint applies to.
+ node_attribute: str, optional, kwarg
+ can be used to run the resources on a set of nodes, not just a
+ single node.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_colocation
+ """
+ specs = '%s: %s' % (score, ' '.join(resources))
+ if 'node_attribute' in kwargs:
+ specs = specs + (' node-attribute=%s' % kwargs['node_attribute'])
+ self['colocations'][name] = specs
+
+ def group(self, name, *resources, **kwargs):
+ """Creates a group of resources within Pacemaker.
+
+ The created group includes the list of resources provided in the list
+ of resources supplied. For example::
+
+ crm.group('grp_mysql', 'res_mysql_rbd', 'res_mysql_fs',
+ 'res_mysql_vip', 'res_mysqld')
+
+ will create the 'grp_mysql' resource group consisting of the
+ res_mysql_rbd, res_mysql_fs, res_mysql_vip, and res_mysqld resources.
+
+ Parameters
+ ----------
+ name: str
+ the name of the group resource
+ resources: list of str
+ the names or ids of resources to include within the group.
+ description: str, optional, kwarg
+ text to describe the resource
+ meta: str or list of str, optional, kwarg
+ metadata attributes to assign to the group
+ params: str or list of str, optional, kwarg
+ parameters to assign to the group
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_group
+ """
+ specs = ' '.join(resources)
+ if 'description' in kwargs:
+ specs = specs + (' description=%s"' % kwargs['description'])
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ specs = specs + (' %s' % self._parse(key, value))
+
+ self['groups'][name] = specs
+
+ def remove_deleted_resources(self):
+ """Work through the existing resources and remove any mention of ones
+ which have been marked for deletion."""
+ for res in self['delete_resources']:
+ for key in self.keys():
+ if key == 'delete_resources':
+ continue
+ if isinstance(self[key], dict) and res in self[key].keys():
+ del self[key][res]
+ elif isinstance(self[key], list) and res in self[key]:
+ self[key].remove(res)
+ elif isinstance(self[key], tuple) and res in self[key]:
+ self[key] = tuple(x for x in self[key] if x != res)
+
+ def delete_resource(self, *resources):
+ """Specify objects/resources to be deleted from within Pacemaker. This
+ is not additive, the list of resources is set to exaclty what was
+ passed in.
+
+ Parameters
+ ----------
+ resources: str or list
+ the name or id of the specific resource to delete.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_delete
+ """
+ self['delete_resources'] = resources
+ self.remove_deleted_resources()
+
+ def add_delete_resource(self, resource):
+ """Specify an object/resource to delete from within Pacemaker. It can
+ be called multiple times to add additional resources to the deletion
+ list.
+
+ Parameters
+ ----------
+ resources: str
+ the name or id of the specific resource to delete.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_delete
+ """
+ if resource not in self['delete_resources']:
+ # NOTE(fnordahl): this unpleasant piece of code is regrettably
+ # necessary for Python3.4 (and trusty) compability see LP: #1814218
+ # and LP: #1813982
+ self['delete_resources'] = tuple(
+ self['delete_resources'] or ()) + (resource,)
+ self.remove_deleted_resources()
+
+ def init_services(self, *resources):
+ """Specifies that the service(s) is an init or upstart service.
+
+ Services (resources) which are noted as upstart services are
+ disabled, stopped, and left to pacemaker to manage the resource.
+
+ Parameters
+ ----------
+ resources: str or list of str, varargs
+ The resources which should be noted as init services.
+
+ Returns
+ -------
+ None
+ """
+ self['init_services'] = resources
+
+ def systemd_services(self, *resources):
+ """Specifies that the service(s) is a systemd service.
+
+ Services (resources) which are noted as systemd services are
+ disabled, stopped, and left to pacemaker to manage the resource.
+
+ Parameters
+ ----------
+ resources: str or list of str, varargs
+ The resources which should be noted as systemd services.
+
+ Returns
+ -------
+ None
+ """
+ self['systemd_services'] = resources
+
+ def ms(self, name, resource, description=None, **kwargs):
+ """Create a master/slave resource type.
+
+ The following code provides an example of creating a master/slave
+ resource on drbd disk1::
+
+ crm.ms('disk1', 'drbd1', meta='notify=true globally-unique=false')
+
+ Parameters
+ ----------
+ name: str
+ the name or id of the master resource
+ resource: str
+ the name or id of the resource which now ha a master/slave
+ assocation tied to it.
+ description: str, optional
+ a textual description of the master resource
+ meta: str or list of strs, optional, kwargs
+ strings defining the metadata for the master/slave resource type
+ params: str or list of strs, optional, kwargs
+ parameter strings which should be passed to the master/slave
+ resource creation
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_ms
+ """
+ specs = resource
+ if description:
+ specs = specs + (' description="%s"' % description)
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ specs = specs + (' %s' % self._parse(key, value))
+
+ self['ms'][name] = specs
+
+ def location(self, name, resource, **kwargs):
+ """Defines the preference of nodes for the given resource.
+
+ The location constraitns consist of one or more rules which specify
+ a score to be awarded if the rules match.
+
+ Parameters
+ ----------
+ name: str
+ the name or id of the location constraint
+ resource: str
+ the name, id, resource, set, tag, or resoruce pattern defining the
+ set of resources which match the location placement constraint.
+ attributes: str or list str, optional, kwarg
+ attributes which should be assigned to the location constraint
+ rule: str or list of str, optional, kwarg
+ the rule(s) which define the location constraint rules when
+ selecting a location to run the resource.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_location
+ """
+ specs = resource
+
+ # Check if there are attributes assigned to the location and if so,
+ # format the spec string with the attributes
+ if 'attributes' in kwargs:
+ attrs = kwargs['attributes']
+ if isinstance(attrs, string_types):
+ attrs = [attrs]
+ specs = specs + (' %s' % ' '.join(attrs))
+
+ if 'rule' in kwargs:
+ rules = kwargs['rule']
+ specs = specs + (' %s' % self._parse('rule', rules))
+
+ self['locations'][name] = specs
+
+ def order(self, name, score=None, *resources, **kwargs):
+ """Configures the ordering constraints of resources.
+
+ Provides ordering constraints to resources defined in a Pacemaker
+ cluster which affect the way that resources are started, stopped,
+ promoted, etc. Basic ordering is provided by simply specifying the
+ ordering name and an ordered list of the resources which the ordering
+ constraint applies to.
+
+ For example, the following code ensures that the apache resource is
+ started after the ClusterIP is started::
+
+ hacluster.order('apache-after-ip', 'ClusterIP', 'apache')
+
+ By default, the ordering constraint will specify that the ordering
+ constraint is mandatory. The constraint behavior can be specified
+ using the 'score' keyword argument, e.g.::
+
+ hacluster.order('apache-after-ip', score=hacluster.OPTIONAL,
+ 'ClusterIP', 'apache')
+
+ Any *resources values which are provided are treated as resources which
+ the ordering constraint applies to. At least two resources must be
+ defined as part of the ordering constraint.
+
+ The resources take the form of [:]. If the
+ ordering constraint applies to a specific action for the resource,
+ this information should be included in the resource supplied.
+
+ Parameters
+ ----------
+ name: str
+ the id or name of the order constraint
+ resoures: str or list of strs in varargs format
+ the resources the ordering constraint applies to. The ordering
+ of the list of resources is used to provide the ordering.
+ score: {MANDATORY, OPTIONAL, SERIALIZED}, optional
+ the score of the ordering constraint.
+ symmetrical: boolean, optional, kwarg
+ when True, then the services for the resources will be stopped in
+ the reverse order. The default value for this is True.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_order
+ """
+ specs = ''
+ if score:
+ specs = '%s:' % score
+
+ specs = specs + (' %s' % ' '.join(resources))
+ if 'symmetrical' in kwargs:
+ specs = specs + (' symmetrical=' % kwargs['symmetrical'])
+
+ self['orders'][name] = specs
+
+ def add(self, resource_desc):
+ """Adds a resource descriptor object to the CRM configuration.
+
+ Adds a `ResourceDescriptor` object to the CRM configuration which
+ understands how to configure the resource itself. The
+ `ResourceDescriptor` object needs to know how to interact with this
+ CRM class in order to properly configure the pacemaker resources.
+
+ The minimum viable resource descriptor object will implement a method
+ which takes a reference parameter to this CRM in order to configure
+ itself.
+
+ Parameters
+ ----------
+ resource_desC: ResourceDescriptor
+ an object which provides an abstraction of a monitored resource
+ within pacemaker.
+
+ Returns
+ -------
+ None
+ """
+ method = getattr(resource_desc, 'configure_resource', None)
+ if not callable(method):
+ raise ValueError('Invalid resource_desc. The "configure_resource"'
+ ' function has not been defined.')
+
+ method(self)
+
+
+class ResourceDescriptor(object):
+ """
+ A ResourceDescriptor provides a logical resource or concept and knows
+ how to configure pacemaker.
+ """
+
+ def configure_resource(self, crm):
+ """Configures the logical resource(s) within the CRM.
+
+ This is the callback method which is invoked by the CRM in order
+ to allow this ResourceDescriptor to fully configure the logical
+ resource.
+
+ For example, a Virtual IP may provide a standard abstraction and
+ configure the specific details under the covers.
+ """
+ pass
+
+
+class InitService(ResourceDescriptor):
+ def __init__(self, service_name, init_service_name, clone=True):
+ """Class for managing init resource
+
+ :param service_name: string - Name of service
+ :param init_service_name: string - Name service uses in init system
+ :param clone: bool - clone service across all units
+ :returns: None
+ """
+ self.service_name = service_name
+ self.init_service_name = init_service_name
+ self.clone = clone
+
+ def configure_resource(self, crm):
+ """"Configure new init system service resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ res_key = 'res_{}_{}'.format(
+ self.service_name.replace('-', '_'),
+ self.init_service_name.replace('-', '_'))
+ res_type = 'lsb:{}'.format(self.init_service_name)
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ res_key, res_type, op='monitor interval="5s"', meta=_meta)
+ crm.init_services(self.init_service_name)
+ if self.clone:
+ clone_key = 'cl_{}'.format(res_key)
+ crm.clone(clone_key, res_key)
+
+
+class VirtualIP(ResourceDescriptor):
+ def __init__(self, service_name, vip, nic=None, cidr=None):
+ """Class for managing VIP resource
+
+ :param service_name: string - Name of service
+ :param vip: string - Virtual IP to be managed
+ :param nic: string - Network interface to bind vip to
+ :param cidr: string - Netmask for vip
+ :returns: None
+ """
+ self.service_name = service_name
+ self.vip = vip
+ self.nic = nic
+ self.cidr = cidr
+
+ def configure_resource(self, crm):
+ """Configure new vip resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ if self.nic:
+ vip_key = 'res_{}_{}_vip'.format(self.service_name, self.nic)
+ else:
+ vip_key = 'res_{}_{}_vip'.format(
+ self.service_name,
+ hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7])
+ ipaddr = ipaddress.ip_address(self.vip)
+ if isinstance(ipaddr, ipaddress.IPv4Address):
+ res_type = 'ocf:heartbeat:IPaddr2'
+ res_params = 'ip="{}"'.format(self.vip)
+ else:
+ res_type = 'ocf:heartbeat:IPv6addr'
+ res_params = 'ipv6addr="{}"'.format(self.vip)
+ vip_params = 'ipv6addr'
+ vip_key = 'res_{}_{}_{}_vip'.format(self.service_name, self.nic,
+ vip_params)
+
+ if self.nic:
+ res_params = '{} nic="{}"'.format(res_params, self.nic)
+ if self.cidr:
+ res_params = '{} cidr_netmask="{}"'.format(res_params, self.cidr)
+ # Monitor the VIP
+ _op_monitor = 'monitor timeout="20s" interval="10s" depth="0"'
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ vip_key, res_type, params=res_params, op=_op_monitor, meta=_meta)
+
+
+class DNSEntry(ResourceDescriptor):
+
+ def __init__(self, service_name, ip, fqdn, endpoint_type):
+ """Class for managing DNS entries
+
+ :param service_name: string - Name of service
+ :param ip: string - IP to point DNS entry at
+ :param fqdn: string - DNS Entry
+ :param endpoint_type: string - The type of the endpoint represented by
+ the DNS record eg public, admin etc
+ :returns: None
+ """
+ self.service_name = service_name
+ self.ip = ip
+ self.fqdn = fqdn
+ self.endpoint_type = endpoint_type
+
+ def configure_resource(self, crm, res_type='ocf:maas:dns'):
+ """Configure new DNS resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :param res_type: string - Corosync Open Cluster Framework resource
+ agent to use for DNS HA
+ :returns: None
+ """
+ res_key = 'res_{}_{}_hostname'.format(
+ self.service_name.replace('-', '_'),
+ self.endpoint_type)
+ res_params = ''
+ if self.fqdn:
+ res_params = '{} fqdn="{}"'.format(res_params, self.fqdn)
+ if self.ip:
+ res_params = '{} ip_address="{}"'.format(res_params, self.ip)
+ crm.primitive(res_key, res_type, params=res_params)
+
+
+class SystemdService(ResourceDescriptor):
+ def __init__(self, service_name, systemd_service_name, clone=True):
+ """Class for managing systemd resource
+
+ :param service_name: string - Name of service
+ :param systemd_service_name: string - Name service uses in
+ systemd system
+ :param clone: bool - clone service across all units
+ :returns: None
+ """
+ self.service_name = service_name
+ self.systemd_service_name = systemd_service_name
+ self.clone = clone
+
+ def configure_resource(self, crm):
+ """"Configure new systemd system service resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ res_key = 'res_{}_{}'.format(
+ self.service_name.replace('-', '_'),
+ self.systemd_service_name.replace('-', '_'))
+ res_type = 'systemd:{}'.format(self.systemd_service_name)
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ res_key, res_type, op='monitor interval="5s"', meta=_meta)
+ crm.systemd_services(self.systemd_service_name)
+ if self.clone:
+ clone_key = 'cl_{}'.format(res_key)
+ crm.clone(clone_key, res_key)
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/copyright b/kubeapi-load-balancer/hooks/relations/hacluster/copyright
new file mode 100644
index 0000000..5a49dcb
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/copyright
@@ -0,0 +1,21 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
+
+Files: *
+Copyright: 2015, Canonical Ltd.
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in `/usr/share/common-licenses/Apache-2.0'.
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml
new file mode 100644
index 0000000..edd0c90
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml
@@ -0,0 +1,13 @@
+name: hacluster
+summary: |
+ Provides the hacluster interface used for configuring Corosync
+ and Pacemaker services.
+maintainer: OpenStack Charmers
+ignore:
+ - '.gitignore'
+ - '.gitreview'
+ - '.testr.conf'
+ - 'test-requirements'
+ - 'tox.ini'
+ - 'unit_tests'
+ - '.zuul.yaml'
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/requires.py b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py
new file mode 100644
index 0000000..9b72d97
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import hashlib
+
+import relations.hacluster.common
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+from charms.reactive.helpers import data_changed
+from charmhelpers.core import hookenv
+
+
+class HAClusterRequires(RelationBase):
+ # The hacluster charm is a subordinate charm and really only works
+ # for a single service to the HA Cluster relation, therefore set the
+ # expected scope to be GLOBAL.
+ scope = scopes.GLOBAL
+
+ @hook('{requires:hacluster}-relation-joined')
+ def joined(self):
+ self.set_state('{relation_name}.connected')
+
+ @hook('{requires:hacluster}-relation-changed')
+ def changed(self):
+ if self.is_clustered():
+ self.set_state('{relation_name}.available')
+ else:
+ self.remove_state('{relation_name}.available')
+
+ @hook('{requires:hacluster}-relation-{broken,departed}')
+ def departed(self):
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+
+ def is_clustered(self):
+ """Has the hacluster charm set clustered?
+
+ The hacluster charm sets cluster=True when it determines it is ready.
+ Check the relation data for clustered and force a boolean return.
+
+ :returns: boolean
+ """
+ clustered_values = self.get_remote_all('clustered')
+ if clustered_values:
+ # There is only ever one subordinate hacluster unit
+ clustered = clustered_values[0]
+ # Future versions of hacluster will return a bool
+ # Current versions return a string
+ if type(clustered) is bool:
+ return clustered
+ elif (clustered is not None and
+ (clustered.lower() == 'true' or
+ clustered.lower() == 'yes')):
+ return True
+ return False
+
+ def bind_on(self, iface=None, mcastport=None):
+ relation_data = {}
+ if iface:
+ relation_data['corosync_bindiface'] = iface
+ if mcastport:
+ relation_data['corosync_mcastport'] = mcastport
+
+ if relation_data and data_changed('hacluster-bind_on', relation_data):
+ self.set_local(**relation_data)
+ self.set_remote(**relation_data)
+
+ def manage_resources(self, crm):
+ """
+ Request for the hacluster to manage the resources defined in the
+ crm object.
+
+ res = CRM()
+ res.primitive('res_neutron_haproxy', 'lsb:haproxy',
+ op='monitor interval="5s"')
+ res.init_services('haproxy')
+ res.clone('cl_nova_haproxy', 'res_neutron_haproxy')
+
+ hacluster.manage_resources(crm)
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ relation_data = {
+ 'json_{}'.format(k): json.dumps(v, sort_keys=True)
+ for k, v in crm.items() if v
+ }
+ if data_changed('hacluster-manage_resources', relation_data):
+ self.set_local(**relation_data)
+ self.set_remote(**relation_data)
+
+ def bind_resources(self, iface=None, mcastport=None):
+ """Inform the ha subordinate about each service it should manage. The
+ child class specifies the services via self.ha_resources
+
+ :param iface: string - Network interface to bind to
+ :param mcastport: int - Multicast port corosync should use for cluster
+ management traffic
+ """
+ if mcastport is None:
+ mcastport = 4440
+ resources_dict = self.get_local('resources')
+ self.bind_on(iface=iface, mcastport=mcastport)
+ if resources_dict:
+ resources = relations.hacluster.common.CRM(**resources_dict)
+ self.manage_resources(resources)
+
+ def delete_resource(self, resource_name):
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add_delete_resource(resource_name)
+ self.set_local(resources=resources)
+
+ def add_vip(self, name, vip, iface=None, netmask=None):
+ """Add a VirtualIP object for each user specified vip to self.resources
+
+ :param name: string - Name of service
+ :param vip: string - Virtual IP to be managed
+ :param iface: string - Network interface to bind vip to
+ :param netmask: string - Netmask for vip
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.VirtualIP(
+ name,
+ vip,
+ nic=iface,
+ cidr=netmask,))
+
+ # Vip Group
+ group = 'grp_{}_vips'.format(name)
+ vip_res_group_members = []
+ if resource_dict:
+ vip_resources = resource_dict.get('resources')
+ if vip_resources:
+ for vip_res in vip_resources:
+ if 'vip' in vip_res:
+ vip_res_group_members.append(vip_res)
+ resources.group(group,
+ *sorted(vip_res_group_members))
+
+ self.set_local(resources=resources)
+
+ def remove_vip(self, name, vip, iface=None):
+ """Remove a virtual IP
+
+ :param name: string - Name of service
+ :param vip: string - Virtual IP
+ :param iface: string - Network interface vip bound to
+ """
+ if iface:
+ nic_name = iface
+ else:
+ nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]
+ self.delete_resource('res_{}_{}_vip'.format(name, nic_name))
+
+ def add_init_service(self, name, service, clone=True):
+ """Add a InitService object for haproxy to self.resources
+
+ :param name: string - Name of service
+ :param service: string - Name service uses in init system
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.InitService(name, service, clone))
+ self.set_local(resources=resources)
+
+ def remove_init_service(self, name, service):
+ """Remove an init service
+
+ :param name: string - Name of service
+ :param service: string - Name of service used in init system
+ """
+ res_key = 'res_{}_{}'.format(
+ name.replace('-', '_'),
+ service.replace('-', '_'))
+ self.delete_resource(res_key)
+
+ def add_systemd_service(self, name, service, clone=True):
+ """Add a SystemdService object to self.resources
+
+ :param name: string - Name of service
+ :param service: string - Name service uses in systemd
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.SystemdService(name, service, clone))
+ self.set_local(resources=resources)
+
+ def remove_systemd_service(self, name, service):
+ """Remove a systemd service
+
+ :param name: string - Name of service
+ :param service: string - Name of service used in systemd
+ """
+ res_key = 'res_{}_{}'.format(
+ name.replace('-', '_'),
+ service.replace('-', '_'))
+ self.delete_resource(res_key)
+
+ def add_dnsha(self, name, ip, fqdn, endpoint_type):
+ """Add a DNS entry to self.resources
+
+ :param name: string - Name of service
+ :param ip: string - IP address dns entry should resolve to
+ :param fqdn: string - The DNS entry name
+ :param endpoint_type: string - Public, private, internal etc
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.DNSEntry(name, ip, fqdn, endpoint_type))
+
+ # DNS Group
+ group = 'grp_{}_hostnames'.format(name)
+ dns_res_group_members = []
+ if resource_dict:
+ dns_resources = resource_dict.get('resources')
+ if dns_resources:
+ for dns_res in dns_resources:
+ if 'hostname' in dns_res:
+ dns_res_group_members.append(dns_res)
+ resources.group(group,
+ *sorted(dns_res_group_members))
+
+ self.set_local(resources=resources)
+
+ def remove_dnsha(self, name, endpoint_type):
+ """Remove a DNS entry
+
+ :param name: string - Name of service
+ :param endpoint_type: string - Public, private, internal etc
+ :returns: None
+ """
+ res_key = 'res_{}_{}_hostname'.format(
+ self.service_name.replace('-', '_'),
+ self.endpoint_type)
+ self.delete_resource(res_key)
+
+ def get_remote_all(self, key, default=None):
+ """Return a list of all values presented by remote units for key"""
+ values = []
+ for conversation in self.conversations():
+ for relation_id in conversation.relation_ids:
+ for unit in hookenv.related_units(relation_id):
+ value = hookenv.relation_get(key,
+ unit,
+ relation_id) or default
+ if value:
+ values.append(value)
+ return list(set(values))
diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt
new file mode 100644
index 0000000..6da7df2
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt
@@ -0,0 +1,6 @@
+# Lint and unit test requirements
+flake8
+stestr>=2.2.0
+charms.reactive
+coverage>=3.6
+netifaces
diff --git a/kubeapi-load-balancer/hooks/relations/http/.gitignore b/kubeapi-load-balancer/hooks/relations/http/.gitignore
new file mode 100644
index 0000000..3374ec2
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/http/.gitignore
@@ -0,0 +1,5 @@
+# Emacs save files
+*~
+\#*\#
+.\#*
+
diff --git a/kubeapi-load-balancer/hooks/relations/http/README.md b/kubeapi-load-balancer/hooks/relations/http/README.md
new file mode 100644
index 0000000..3d7822a
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/http/README.md
@@ -0,0 +1,68 @@
+# Overview
+
+This interface layer implements the basic form of the `http` interface protocol,
+which is used for things such as reverse-proxies, load-balanced servers, REST
+service discovery, et cetera.
+
+# Usage
+
+## Provides
+
+By providing the `http` interface, your charm is providing an HTTP server that
+can be load-balanced, reverse-proxied, used as a REST endpoint, etc.
+
+Your charm need only provide the port on which it is serving its content, as
+soon as the `{relation_name}.available` state is set:
+
+```python
+@when('website.available')
+def configure_website(website):
+ website.configure(port=hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `http` interface, your charm is consuming one or more HTTP
+servers, as a REST endpoint, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `services()` method returns a list of available HTTP services and their
+associated hosts and ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charms.reactive.helpers import data_changed
+
+@when('reverseproxy.available')
+def update_reverse_proxy_config(reverseproxy):
+ services = reverseproxy.services()
+ if not data_changed('reverseproxy.services', services):
+ return
+ for service in services:
+ for host in service['hosts']:
+ hookenv.log('{} has a unit {}:{}'.format(
+ services['service_name'],
+ host['hostname'],
+ host['port']))
+```
diff --git a/kubeapi-load-balancer/hooks/relations/http/__init__.py b/kubeapi-load-balancer/hooks/relations/http/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/http/interface.yaml b/kubeapi-load-balancer/hooks/relations/http/interface.yaml
new file mode 100644
index 0000000..54e7748
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/http/interface.yaml
@@ -0,0 +1,4 @@
+name: http
+summary: Basic HTTP interface
+version: 1
+repo: https://git.launchpad.net/~bcsaller/charms/+source/http
diff --git a/kubeapi-load-balancer/hooks/relations/http/provides.py b/kubeapi-load-balancer/hooks/relations/http/provides.py
new file mode 100644
index 0000000..86fa9b3
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/http/provides.py
@@ -0,0 +1,67 @@
+import json
+
+from charmhelpers.core import hookenv
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpProvides(Endpoint):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def get_ingress_address(self, rel_id=None):
+ # If no rel_id is provided, we fallback to the first one
+ if rel_id is None:
+ rel_id = self.relations[0].relation_id
+ return hookenv.ingress_address(rel_id, hookenv.local_unit())
+
+ def configure(self, port, private_address=None, hostname=None):
+ ''' configure the address(es). private_address and hostname can
+ be None, a single string address/hostname, or a list of addresses
+ and hostnames. Note that if a list is passed, it is assumed both
+ private_address and hostname are either lists or None '''
+ for relation in self.relations:
+ ingress_address = self.get_ingress_address(relation.relation_id)
+ if type(private_address) is list or type(hostname) is list:
+ # build 3 lists to zip together that are the same length
+ length = max(len(private_address), len(hostname))
+ p = [port] * length
+ a = private_address + [ingress_address] *\
+ (length - len(private_address))
+ h = hostname + [ingress_address] * (length - len(hostname))
+ zipped_list = zip(p, a, h)
+ # now build an array of dictionaries from that in the desired
+ # format for the interface
+ data_list = [{'hostname': h, 'port': p, 'private-address': a}
+ for p, a, h in zipped_list]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ data = data_list.pop(0)
+ data['extended_data'] = json.dumps(data_list)
+
+ relation.to_publish_raw.update(data)
+ else:
+ relation.to_publish_raw.update({
+ 'hostname': hostname or ingress_address,
+ 'private-address': private_address or ingress_address,
+ 'port': port,
+ })
+
+ def set_remote(self, **kwargs):
+ # NB: This method provides backwards compatibility for charms that
+ # called RelationBase.set_remote. Most commonly, this was done by
+ # charms that needed to pass reverse proxy stanzas to http proxies.
+ # This type of interaction with base relation classes is discouraged,
+ # and should be handled with logic encapsulated in appropriate
+ # interfaces. Eventually, this method will be deprecated in favor of
+ # that behavior.
+ for relation in self.relations:
+ relation.to_publish_raw.update(kwargs)
diff --git a/kubeapi-load-balancer/hooks/relations/http/requires.py b/kubeapi-load-balancer/hooks/relations/http/requires.py
new file mode 100644
index 0000000..17ea6b7
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/http/requires.py
@@ -0,0 +1,76 @@
+import json
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpRequires(Endpoint):
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ if any(unit.received_raw['port'] for unit in self.all_joined_units):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def services(self):
+ """
+ Returns a list of available HTTP services and their associated hosts
+ and ports.
+
+ The return value is a list of dicts of the following form::
+
+ [
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'private-address': private_address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+ ]
+ """
+ def build_service_host(data):
+ private_address = data['private-address']
+ host = data['hostname'] or private_address
+ if host and data['port']:
+ return (host, private_address, data['port'])
+ else:
+ return None
+
+ services = {}
+ for relation in self.relations:
+ service_name = relation.application_name
+ service = services.setdefault(service_name, {
+ 'service_name': service_name,
+ 'hosts': [],
+ })
+ host_set = set()
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ host = build_service_host(data)
+ if host:
+ host_set.add(host)
+
+ # if we have extended data, add it
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ host = build_service_host(ed)
+ if host:
+ host_set.add(host)
+
+ service['hosts'] = [
+ {'hostname': h, 'private-address': pa, 'port': p}
+ for h, pa, p in sorted(host_set)
+ ]
+
+ ret = [s for s in services.values() if s['hosts']]
+ return ret
diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md
new file mode 100644
index 0000000..e33deb8
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md
@@ -0,0 +1,66 @@
+# nrpe-external-master interface
+
+Use this interface to register nagios checks in your charm layers.
+
+## Purpose
+
+This interface is designed to interoperate with the
+[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm.
+
+## How to use in your layers
+
+The event handler for `nrpe-external-master.available` is called with an object
+through which you can register your own custom nagios checks, when a relation
+is established with `nrpe-external-master:nrpe-external-master`.
+
+This object provides a method,
+
+_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_)
+
+which is called to register a nagios plugin check for your service.
+
+All arguments are required.
+
+*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable.
+
+*name* is the name of the check registered in nagios
+
+*description* is some text that describes what the check is for and what it does
+
+*context* is the nagios context name, something that identifies your application
+
+*unit* is `hookenv.local_unit()`
+
+The nrpe subordinate installs `check_http`, so you can use it like this:
+
+```
+@when('nrpe-external-master.available')
+def setup_nagios(nagios):
+ config = hookenv.config()
+ unit_name = hookenv.local_unit()
+ nagios.add_check(['/usr/lib/nagios/plugins/check_http',
+ '-I', '127.0.0.1', '-p', str(config['port']),
+ '-e', " 200 OK", '-u', '/publickey'],
+ name="check_http",
+ description="Verify my awesome service is responding",
+ context=config["nagios_context"],
+ unit=unit_name,
+ )
+```
+If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service.
+
+Consult the nagios documentation for more information on [how to write your own
+plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html)
+or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need.
+
+## Example deployment
+
+```
+$ juju deploy your-awesome-charm
+$ juju deploy nrpe-external-master --config site-nagios.yaml
+$ juju add-relation your-awesome-charm nrpe-external-master
+```
+
+where `site-nagios.yaml` has the necessary configuration settings for the
+subordinate to connect to nagios.
+
diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/__init__.py b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml
new file mode 100644
index 0000000..859a423
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml
@@ -0,0 +1,3 @@
+name: nrpe-external-master
+summary: Nagios interface
+version: 1
diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py
new file mode 100644
index 0000000..b6c7f0d
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py
@@ -0,0 +1,91 @@
+import datetime
+import os
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+
+
+class NrpeExternalMasterProvides(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:nrpe-external-master}-relation-{joined,changed}')
+ def changed_nrpe(self):
+ self.set_state('{relation_name}.available')
+
+ @hook('{provides:nrpe-external-master}-relation-{broken,departed}')
+ def broken_nrpe(self):
+ self.remove_state('{relation_name}.available')
+
+ def add_check(self, args, name=None, description=None, context=None,
+ servicegroups=None, unit=None):
+ nagios_files = self.get_local('nagios.check.files', [])
+
+ if not unit:
+ unit = hookenv.local_unit()
+ unit = unit.replace('/', '-')
+ context = self.get_remote('nagios_host_context', context)
+ host_name = self.get_remote('nagios_hostname',
+ '%s-%s' % (context, unit))
+
+ check_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+command[%(check_name)s]=%(check_args)s
+"""
+ service_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {
+ use active-service
+ host_name %(host_name)s
+ service_description %(description)s
+ check_command check_nrpe!%(check_name)s
+ servicegroups %(servicegroups)s
+}
+"""
+ check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name)
+ with open(check_filename, "w") as fh:
+ fh.write(check_tmpl % {
+ 'check_args': ' '.join(args),
+ 'check_name': name,
+ })
+ nagios_files.append(check_filename)
+
+ service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % (
+ unit, name)
+ with open(service_filename, "w") as fh:
+ fh.write(service_tmpl % {
+ 'servicegroups': servicegroups or context,
+ 'context': context,
+ 'description': description,
+ 'check_name': name,
+ 'host_name': host_name,
+ 'unit_name': unit,
+ })
+ nagios_files.append(service_filename)
+
+ self.set_local('nagios.check.files', nagios_files)
+
+ def removed(self):
+ files = self.get_local('nagios.check.files', [])
+ for f in files:
+ try:
+ os.unlink(f)
+ except Exception as e:
+ hookenv.log("failed to remove %s: %s" % (f, e))
+ self.set_local('nagios.check.files', [])
+ self.remove_state('{relation_name}.removed')
+
+ def added(self):
+ self.updated()
+
+ def updated(self):
+ relation_info = {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ }
+ self.set_remote(**relation_info)
diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/requires.py b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/requires.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/public-address/README.md b/kubeapi-load-balancer/hooks/relations/public-address/README.md
new file mode 100644
index 0000000..06be3ae
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/public-address/README.md
@@ -0,0 +1,59 @@
+# Overview
+
+This interface layer implements a public address protocol useful for load
+balancers and their subordinates. The load balancers (providers) set their
+own public address and port, which is then available to the subordinates
+(requirers).
+
+# Usage
+
+## Provides
+
+By providing the `public-address` interface, your charm is providing an HTTP
+server that can load-balance for another HTTP based service.
+
+Your charm need only provide the address and port on which it is serving its
+content, as soon as the `{relation_name}.available` state is set:
+
+```python
+from charmhelpers.core import hookenv
+@when('website.available')
+def configure_website(website):
+ website.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `public-address` interface, your charm is consuming one or
+more HTTP servers, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `get_addresses_ports()` method returns a list of available addresses and
+ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'public-address': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charmhelpers.core import hookenv
+@when('loadbalancer.available')
+def update_reverse_proxy_config(loadbalancer):
+ hosts = loadbalancer.get_addresses_ports()
+ for host in hosts:
+ hookenv.log('The loadbalancer for this unit is {}:{}'.format(
+ host['public-address'],
+ host['port']))
+```
diff --git a/kubeapi-load-balancer/hooks/relations/public-address/__init__.py b/kubeapi-load-balancer/hooks/relations/public-address/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml b/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml
new file mode 100644
index 0000000..c9849e4
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml
@@ -0,0 +1,4 @@
+name: public-address
+summary: A basic interface to provide the public address for load balancers.
+version: 1
+repo: https://githb.com/juju-solutions/interface-public-address.git
diff --git a/kubeapi-load-balancer/hooks/relations/public-address/provides.py b/kubeapi-load-balancer/hooks/relations/public-address/provides.py
new file mode 100644
index 0000000..09b9915
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/public-address/provides.py
@@ -0,0 +1,60 @@
+import json
+
+from charms.reactive import toggle_flag
+from charms.reactive import Endpoint
+
+
+class PublicAdddressProvides(Endpoint):
+
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.is_joined)
+
+ def set_address_port(self, address, port, relation=None):
+ if relation is None:
+ # no relation specified, so send the same data to everyone
+ relations = self.relations
+ else:
+ # specific relation given, so only send the data to that one
+ relations = [relation]
+ if type(address) is list:
+ # build 2 lists to zip together that are the same length
+ length = len(address)
+ p = [port] * length
+ combined = zip(address, p)
+ clients = [{'public-address': a, 'port': p}
+ for a, p in combined]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ first = clients.pop(0)
+ first['extended_data'] = json.dumps(clients)
+ for relation in relations:
+ relation.to_publish_raw.update(first)
+ else:
+ for relation in relations:
+ relation.to_publish_raw.update({'public-address': address,
+ 'port': port})
+
+ @property
+ def requests(self):
+ return [Request(rel) for rel in self.relations]
+
+
+class Request:
+ def __init__(self, rel):
+ self.rel = rel
+
+ @property
+ def application_name(self):
+ return self.rel.application_name
+
+ @property
+ def members(self):
+ return [(u.received_raw.get('ingress-address',
+ u.received_raw['private-address']),
+ u.received_raw.get('port', '6443'))
+ for u in self.rel.joined_units]
+
+ def set_address_port(self, address, port):
+ self.rel.endpoint.set_address_port(address, port, self.rel)
diff --git a/kubeapi-load-balancer/hooks/relations/public-address/requires.py b/kubeapi-load-balancer/hooks/relations/public-address/requires.py
new file mode 100644
index 0000000..467d129
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/public-address/requires.py
@@ -0,0 +1,44 @@
+import json
+
+from charms.reactive import toggle_flag, Endpoint
+
+
+class PublicAddressRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ len(self.get_addresses_ports()) > 0)
+
+ def set_backend_port(self, port):
+ """
+ Set the port that the backend service is listening on.
+
+ Defaults to 6443 if not set.
+ """
+ for rel in self.relations:
+ rel.to_publish_raw['port'] = str(port)
+
+ def get_addresses_ports(self):
+ '''Returns a list of available HTTP providers and their associated
+ public addresses and ports.
+
+ The return value is a list of dicts of the following form::
+ [
+ {
+ 'public-address': address_for_frontend,
+ 'port': port_for_frontend,
+ },
+ # ...
+ ]
+ '''
+ hosts = set()
+ for relation in self.relations:
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ hosts.add((data['public-address'], data['port']))
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ hosts.add((ed['public-address'], ed['port']))
+
+ return [{'public-address': pa, 'port': p}
+ for pa, p in sorted(host for host in hosts
+ if None not in host)]
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore b/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore
new file mode 100644
index 0000000..93813bc
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore
@@ -0,0 +1,4 @@
+.tox
+__pycache__
+*.pyc
+_build
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md
new file mode 100644
index 0000000..733da6d
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md
@@ -0,0 +1,90 @@
+# Interface tls-certificates
+
+This is a [Juju][] interface layer that enables a charm which requires TLS
+certificates to relate to a charm which can provide them, such as [Vault][] or
+[EasyRSA][]
+
+To get started please read the [Introduction to PKI][] which defines some PKI
+terms, concepts and processes used in this document.
+
+# Example Usage
+
+Let's say you have a charm which needs a server certificate for a service it
+provides to other charms and a client certificate for a database it consumes
+from another charm. The charm provides its own service on the `clients`
+relation endpoint, and it consumes the database on the `db` relation endpoint.
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+requires:
+ cert-provider:
+ interface: tls-certificates
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:tls-certificates
+```
+
+Then, in your reactive code, add the following, changing `update_certs` to
+handle the certificates however your charm needs:
+
+```python
+from charmhelpers.core import hookenv, host
+from charms.reactive import endpoint_from_flag
+
+
+@when('cert-provider.ca.changed')
+def install_root_ca_cert():
+ cert_provider = endpoint_from_flag('cert-provider.ca.available')
+ host.install_ca_cert(cert_provider.root_ca_cert)
+ clear_flag('cert-provider.ca.changed')
+
+
+@when('cert-provider.available')
+def request_certificates():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+
+ # get ingress info
+ ingress_for_clients = hookenv.network_get('clients')['ingress-addresses']
+ ingress_for_db = hookenv.network_get('db')['ingress-addresses']
+
+ # use first ingress address as primary and any additional as SANs
+ server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1]
+ client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1]
+
+ # request a single server and single client cert; note that multiple certs
+ # of either type can be requested as long as they have unique common names
+ cert_provider.request_server_cert(server_cn, server_sans)
+ cert_provider.request_client_cert(client_cn, client_sans)
+
+
+@when('cert-provider.certs.changed')
+def update_certs():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+ server_cert = cert_provider.server_certs[0] # only requested one
+ myserver.update_server_cert(server_cert.cert, server_cert.key)
+
+ client_cert = cert_provider.client_certs[0] # only requested one
+ myclient.update_client_cert(client_cert.cert, client_cert.key)
+ clear_flag('cert-provider.certs.changed')
+```
+
+
+# Reference
+
+ * [Requires](docs/requires.md)
+ * [Provides](docs/provides.md)
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Vault]: https://jujucharms.com/u/openstack-charmers/vault
+[EasyRSA]: https://jujucharms.com/u/containers/easyrsa
+[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/__init__.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md
new file mode 100644
index 0000000..25d0e08
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md
@@ -0,0 +1,51 @@
+
+
+Name of the application which the request came from.
+
+:returns: Name of application
+:rtype: str
+
+
cert
+
+
+The cert published for this request, if any.
+
+
cert_type
+
+
+Type of certificate, 'server' or 'client', being requested.
+
+
resolve_unit_name
+
+```python
+CertificateRequest.resolve_unit_name(unit)
+```
+Return name of unit associated with this request.
+
+unit_name should be provided in the relation data to ensure
+compatability with cross-model relations. If the unit name
+is absent then fall back to unit_name attribute of the
+unit associated with this request.
+
+:param unit: Unit to extract name from
+:type unit: charms.reactive.endpoints.RelatedUnit
+:returns: Name of unit
+:rtype: str
+
+
Certificate
+
+```python
+Certificate(self, cert_type, common_name, cert, key)
+```
+
+Represents a created certificate and key.
+
+The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+be accessed either as properties or as the contents of the dict.
+
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md
new file mode 100644
index 0000000..c213546
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md
@@ -0,0 +1,212 @@
+
provides
+
+
+
TlsProvides
+
+```python
+TlsProvides(self, endpoint_name, relation_ids=None)
+```
+
+The provider's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[all_requests]: provides.md#provides.TlsProvides.all_requests
+[new_requests]: provides.md#provides.TlsProvides.new_requests
+[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+
+
all_published_certs
+
+
+List of all [Certificate][] instances that this provider has published
+for all related applications.
+
+
all_requests
+
+
+List of all requests that have been made.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('certs.regen',
+ 'tls.certs.available')
+def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_application_requests
+
+
+Filtered view of [new_requests][] that only includes application cert
+requests.
+
+Each will be an instance of [ApplicationCertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.application.certs.requested')
+def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_client_requests
+
+
+Filtered view of [new_requests][] that only includes client cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.client.certs.requested')
+def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_requests
+
+
+Filtered view of [all_requests][] that only includes requests that
+haven't been handled.
+
+Each will be an instance of [CertificateRequest][].
+
+This collection can also be further filtered by request type using
+[new_server_requests][] or [new_client_requests][].
+
+Example usage:
+
+```python
+@when('tls.certs.requested')
+def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_server_requests
+
+
+Filtered view of [new_requests][] that only includes server cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.server.certs.requested')
+def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
set_ca
+
+```python
+TlsProvides.set_ca(certificate_authority)
+```
+
+Publish the CA to all related applications.
+
+
set_chain
+
+```python
+TlsProvides.set_chain(chain)
+```
+
+Publish the chain of trust to all related applications.
+
+
set_client_cert
+
+```python
+TlsProvides.set_client_cert(cert, key)
+```
+
+Deprecated. This is only for backwards compatibility.
+
+Publish a globally shared client cert and key.
+
+
set_server_cert
+
+```python
+TlsProvides.set_server_cert(scope, cert, key)
+```
+
+Deprecated. Use one of the [new_requests][] collections and
+`request.set_cert()` instead.
+
+Set the server cert and key for the request identified by `scope`.
+
+
+
+```python
+TlsProvides.get_server_requests()
+```
+
+Deprecated. Use the [new_requests][] or [server_requests][]
+collections instead.
+
+One provider can have many requests to generate server certificates.
+Return a map of all server request objects indexed by a unique
+identifier.
+
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md
new file mode 100644
index 0000000..fdec902
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md
@@ -0,0 +1,207 @@
+
requires
+
+
+
TlsRequires
+
+```python
+TlsRequires(self, endpoint_name, relation_ids=None)
+```
+
+The client's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+[server_certs]: requires.md#requires.TlsRequires.server_certs
+[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+[client_certs]: requires.md#requires.TlsRequires.server_certs
+
+
application_certs
+
+
+List of [Certificate][] instances for all available application certs.
+
+
client_certs
+
+
+List of [Certificate][] instances for all available client certs.
+
+
client_certs_map
+
+
+Mapping of client [Certificate][] instances by their `common_name`.
+
+
root_ca_cert
+
+
+Root CA certificate.
+
+
root_ca_chain
+
+
+The chain of trust for the root CA.
+
+
server_certs
+
+
+List of [Certificate][] instances for all available server certs.
+
+
server_certs_map
+
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
get_ca
+
+```python
+TlsRequires.get_ca()
+```
+
+Return the root CA certificate.
+
+Same as [root_ca_cert][].
+
+
get_chain
+
+```python
+TlsRequires.get_chain()
+```
+
+Return the chain of trust for the root CA.
+
+Same as [root_ca_chain][].
+
+
get_client_cert
+
+```python
+TlsRequires.get_client_cert()
+```
+
+Deprecated. Use [request_client_cert][] and the [client_certs][]
+collection instead.
+
+Return a globally shared client certificate and key.
+
+
get_server_cert
+
+```python
+TlsRequires.get_server_cert()
+```
+
+Deprecated. Use the [server_certs][] collection instead.
+
+Return the cert and key of the first server certificate requested.
+
+
get_batch_requests
+
+```python
+TlsRequires.get_batch_requests()
+```
+
+Deprecated. Use [server_certs_map][] instead.
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
request_server_cert
+
+```python
+TlsRequires.request_server_cert(cn, sans=None, cert_name=None)
+```
+
+Request a server certificate and key be generated for the given
+common name (`cn`) and optional list of alternative names (`sans`).
+
+The `cert_name` is deprecated and not needed.
+
+This can be called multiple times to request more than one server
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
+
+```python
+TlsRequires.request_server_certs()
+```
+
+Deprecated. Just use [request_server_cert][]; this does nothing.
+
+
request_client_cert
+
+```python
+TlsRequires.request_client_cert(cn, sans)
+```
+
+Request a client certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans`).
+
+This can be called multiple times to request more than one client
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
request_application_cert
+
+```python
+TlsRequires.request_application_cert(cn, sans)
+```
+
+Request an application certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans` ) of this
+unit and all peer units. All units will share a single certificates.
+
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml b/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml
new file mode 100644
index 0000000..beec53b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml
@@ -0,0 +1,6 @@
+name: tls-certificates
+summary: |
+ A Transport Layer Security (TLS) charm layer that uses requires and provides
+ to exchange certifcates.
+version: 1
+repo: https://github.com/juju-solutions/interface-tls-certificates
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs b/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs
new file mode 100644
index 0000000..2f2274a
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs
@@ -0,0 +1,23 @@
+#!.tox/py3/bin/python
+
+import sys
+import importlib
+from pathlib import Path
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'cert': {'interface': 'tls-certificates'}},
+ 'provides': {'cert': {'interface': 'tls-certificates'}},
+ }
+ sys.path.append('..')
+ sys.modules[''] = importlib.import_module(Path.cwd().name)
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py
new file mode 100644
index 0000000..0262baa
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py
@@ -0,0 +1,301 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+
+from .tls_certificates_common import (
+ ApplicationCertificateRequest,
+ CertificateRequest
+)
+
+
+class TlsProvides(Endpoint):
+ """
+ The provider's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [all_requests]: provides.md#provides.TlsProvides.all_requests
+ [new_requests]: provides.md#provides.TlsProvides.new_requests
+ [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+ [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+ toggle_flag(self.expand_name('{endpoint_name}.certs.requested'),
+ self.new_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'),
+ self.new_client_requests)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'),
+ self.new_application_requests)
+ # For backwards compatibility, set the old "cert" flags as well
+ toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'),
+ self.new_client_requests)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.server.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.client.certs.requested'))
+ clear_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'))
+
+ def set_ca(self, certificate_authority):
+ """
+ Publish the CA to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same CA, so send it to them.
+ relation.to_publish_raw['ca'] = certificate_authority
+
+ def set_chain(self, chain):
+ """
+ Publish the chain of trust to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same chain, so send it to them.
+ relation.to_publish_raw['chain'] = chain
+
+ def set_client_cert(self, cert, key):
+ """
+ Deprecated. This is only for backwards compatibility.
+
+ Publish a globally shared client cert and key.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'client.cert': cert,
+ 'client.key': key,
+ })
+
+ def set_server_cert(self, scope, cert, key):
+ """
+ Deprecated. Use one of the [new_requests][] collections and
+ `request.set_cert()` instead.
+
+ Set the server cert and key for the request identified by `scope`.
+ """
+ request = self.get_server_requests()[scope]
+ request.set_cert(cert, key)
+
+ def set_server_multicerts(self, scope):
+ """
+ Deprecated. Done automatically.
+ """
+ pass
+
+ def add_server_cert(self, scope, cn, cert, key):
+ '''
+ Deprecated. Use `request.set_cert()` instead.
+ '''
+ self.set_server_cert(scope, cert, key)
+
+ def get_server_requests(self):
+ """
+ Deprecated. Use the [new_requests][] or [server_requests][]
+ collections instead.
+
+ One provider can have many requests to generate server certificates.
+ Return a map of all server request objects indexed by a unique
+ identifier.
+ """
+ return {req._key: req for req in self.new_server_requests}
+
+ @property
+ def all_requests(self):
+ """
+ List of all requests that have been made.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('certs.regen',
+ 'tls.certs.available')
+ def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ requests = []
+ for unit in self.all_joined_units:
+ # handle older single server cert request
+ if unit.received_raw['common_name']:
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ unit.received_raw['certificate_name'],
+ unit.received_raw['common_name'],
+ unit.received['sans'],
+ ))
+
+ # handle mutli server cert requests
+ reqs = unit.received['cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+
+ # handle client cert requests
+ reqs = unit.received['client_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'client',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+ # handle application cert requests
+ reqs = unit.received['application_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(ApplicationCertificateRequest(
+ unit,
+ 'application',
+ common_name,
+ common_name,
+ req['sans']
+ ))
+ return requests
+
+ @property
+ def new_requests(self):
+ """
+ Filtered view of [all_requests][] that only includes requests that
+ haven't been handled.
+
+ Each will be an instance of [CertificateRequest][].
+
+ This collection can also be further filtered by request type using
+ [new_server_requests][] or [new_client_requests][].
+
+ Example usage:
+
+ ```python
+ @when('tls.certs.requested')
+ def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.all_requests if not req.is_handled]
+
+ @property
+ def new_server_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes server cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.server.certs.requested')
+ def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'server']
+
+ @property
+ def new_client_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes client cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.client.certs.requested')
+ def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'client']
+
+ @property
+ def new_application_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes application cert
+ requests.
+
+ Each will be an instance of [ApplicationCertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.application.certs.requested')
+ def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+
+ :returns: List of certificate requests.
+ :rtype: [CertificateRequest, ]
+ """
+ return [req for req in self.new_requests
+ if req.cert_type == 'application']
+
+ @property
+ def all_published_certs(self):
+ """
+ List of all [Certificate][] instances that this provider has published
+ for all related applications.
+ """
+ return [req.cert for req in self.all_requests if req.cert]
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml b/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml
new file mode 100644
index 0000000..c568913
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'TLS Certificates Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.TlsRequires+
+ - provides.md:
+ - provides
+ - provides.TlsProvides+
+ - common.md:
+ - tls_certificates_common.CertificateRequest+
+ - tls_certificates_common.Certificate+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Common: common.md
+
+gens_dir: docs
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py
new file mode 100644
index 0000000..951f953
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py
@@ -0,0 +1,342 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+import uuid
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+from charms.reactive import Endpoint
+from charms.reactive import data_changed
+
+from .tls_certificates_common import Certificate
+
+
+class TlsRequires(Endpoint):
+ """
+ The client's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+ [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+ [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+ [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+ [server_certs]: requires.md#requires.TlsRequires.server_certs
+ [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+ [client_certs]: requires.md#requires.TlsRequires.server_certs
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ self.relations[0].to_publish_raw['unit_name'] = self._unit_name
+ prefix = self.expand_name('{endpoint_name}.')
+ ca_available = self.root_ca_cert
+ ca_changed = ca_available and data_changed(prefix + 'ca',
+ self.root_ca_cert)
+ server_available = self.server_certs
+ server_changed = server_available and data_changed(prefix + 'servers',
+ self.server_certs)
+ client_available = self.client_certs
+ client_changed = client_available and data_changed(prefix + 'clients',
+ self.client_certs)
+ certs_available = server_available or client_available
+ certs_changed = server_changed or client_changed
+
+ set_flag(prefix + 'available')
+ toggle_flag(prefix + 'ca.available', ca_available)
+ toggle_flag(prefix + 'ca.changed', ca_changed)
+ toggle_flag(prefix + 'server.certs.available', server_available)
+ toggle_flag(prefix + 'server.certs.changed', server_changed)
+ toggle_flag(prefix + 'client.certs.available', client_available)
+ toggle_flag(prefix + 'client.certs.changed', client_changed)
+ toggle_flag(prefix + 'certs.available', certs_available)
+ toggle_flag(prefix + 'certs.changed', certs_changed)
+ # deprecated
+ toggle_flag(prefix + 'server.cert.available', self.server_certs)
+ toggle_flag(prefix + 'client.cert.available', self.get_client_cert())
+ toggle_flag(prefix + 'batch.cert.available', self.server_certs)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ prefix = self.expand_name('{endpoint_name}.')
+ clear_flag(prefix + 'available')
+ clear_flag(prefix + 'ca.available')
+ clear_flag(prefix + 'ca.changed')
+ clear_flag(prefix + 'server.certs.available')
+ clear_flag(prefix + 'server.certs.changed')
+ clear_flag(prefix + 'client.certs.available')
+ clear_flag(prefix + 'client.certs.changed')
+ clear_flag(prefix + 'certs.available')
+ clear_flag(prefix + 'certs.changed')
+ # deprecated
+ clear_flag(prefix + 'server.cert.available')
+ clear_flag(prefix + 'client.cert.available')
+ clear_flag(prefix + 'batch.cert.available')
+
+ @property
+ def _unit_name(self):
+ return hookenv.local_unit().replace('/', '_')
+
+ @property
+ def root_ca_cert(self):
+ """
+ Root CA certificate.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['ca']
+
+ def get_ca(self):
+ """
+ Return the root CA certificate.
+
+ Same as [root_ca_cert][].
+ """
+ return self.root_ca_cert
+
+ @property
+ def root_ca_chain(self):
+ """
+ The chain of trust for the root CA.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['chain']
+
+ def get_chain(self):
+ """
+ Return the chain of trust for the root CA.
+
+ Same as [root_ca_chain][].
+ """
+ return self.root_ca_chain
+
+ def get_client_cert(self):
+ """
+ Deprecated. Use [request_client_cert][] and the [client_certs][]
+ collection instead.
+
+ Return a globally shared client certificate and key.
+ """
+ data = self.all_joined_units.received_raw
+ return (data['client.cert'], data['client.key'])
+
+ def get_server_cert(self):
+ """
+ Deprecated. Use the [server_certs][] collection instead.
+
+ Return the cert and key of the first server certificate requested.
+ """
+ if not self.server_certs:
+ return (None, None)
+ cert = self.server_certs[0]
+ return (cert.cert, cert.key)
+
+ @property
+ def server_certs(self):
+ """
+ List of [Certificate][] instances for all available server certs.
+ """
+ certs = []
+ raw_data = self.all_joined_units.received_raw
+ json_data = self.all_joined_units.received
+
+ # for backwards compatibility, the first cert goes in its own fields
+ if self.relations:
+ common_name = self.relations[0].to_publish_raw['common_name']
+ cert = raw_data['{}.server.cert'.format(self._unit_name)]
+ key = raw_data['{}.server.key'.format(self._unit_name)]
+ if cert and key:
+ certs.append(Certificate('server',
+ common_name,
+ cert,
+ key))
+
+ # subsequent requests go in the collection
+ field = '{}.processed_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ certs.extend(Certificate('server',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items())
+ return certs
+
+ @property
+ def application_certs(self):
+ """
+ List containg the application Certificate cert.
+
+ :returns: A list containing one certificate
+ :rtype: [Certificate()]
+ """
+ certs = []
+ json_data = self.all_joined_units.received
+ field = '{}.processed_application_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ app_cert_data = certs_data.get('app_data')
+ if app_cert_data:
+ certs = [Certificate(
+ 'server',
+ 'app_data',
+ app_cert_data['cert'],
+ app_cert_data['key'])]
+ return certs
+
+ @property
+ def server_certs_map(self):
+ """
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.server_certs}
+
+ def get_batch_requests(self):
+ """
+ Deprecated. Use [server_certs_map][] instead.
+
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return self.server_certs_map
+
+ @property
+ def client_certs(self):
+ """
+ List of [Certificate][] instances for all available client certs.
+ """
+ field = '{}.processed_client_requests'.format(self._unit_name)
+ certs_data = self.all_joined_units.received[field] or {}
+ return [Certificate('client',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items()]
+
+ @property
+ def client_certs_map(self):
+ """
+ Mapping of client [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.client_certs}
+
+ def request_server_cert(self, cn, sans=None, cert_name=None):
+ """
+ Request a server certificate and key be generated for the given
+ common name (`cn`) and optional list of alternative names (`sans`).
+
+ The `cert_name` is deprecated and not needed.
+
+ This can be called multiple times to request more than one server
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ to_publish_raw = self.relations[0].to_publish_raw
+ if to_publish_raw['common_name'] in (None, '', cn):
+ # for backwards compatibility, first request goes in its own fields
+ to_publish_raw['common_name'] = cn
+ to_publish_json['sans'] = sans or []
+ cert_name = to_publish_raw.get('certificate_name') or cert_name
+ if cert_name is None:
+ cert_name = str(uuid.uuid4())
+ to_publish_raw['certificate_name'] = cert_name
+ else:
+ # subsequent requests go in the collection
+ requests = to_publish_json.get('cert_requests', {})
+ requests[cn] = {'sans': sans or []}
+ to_publish_json['cert_requests'] = requests
+
+ def add_request_server_cert(self, cn, sans):
+ """
+ Deprecated. Use [request_server_cert][] instead.
+ """
+ self.request_server_cert(cn, sans)
+
+ def request_server_certs(self):
+ """
+ Deprecated. Just use [request_server_cert][]; this does nothing.
+ """
+ pass
+
+ def request_client_cert(self, cn, sans):
+ """
+ Request a client certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans`).
+
+ This can be called multiple times to request more than one client
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('client_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['client_cert_requests'] = requests
+
+ def request_application_cert(self, cn, sans):
+ """
+ Request an application certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans` ) of this
+ unit and all peer units. All units will share a single certificates.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('application_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['application_cert_requests'] = requests
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py
new file mode 100644
index 0000000..99a2f8c
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py
@@ -0,0 +1,302 @@
+from charms.reactive import clear_flag, is_data_changed, data_changed
+
+
+class CertificateRequest(dict):
+ def __init__(self, unit, cert_type, cert_name, common_name, sans):
+ self._unit = unit
+ self._cert_type = cert_type
+ super().__init__({
+ 'certificate_name': cert_name,
+ 'common_name': common_name,
+ 'sans': sans,
+ })
+
+ @property
+ def _key(self):
+ return '.'.join((self._unit.relation.relation_id,
+ self.unit_name,
+ self.common_name))
+
+ def resolve_unit_name(self, unit):
+ """Return name of unit associated with this request.
+
+ unit_name should be provided in the relation data to ensure
+ compatability with cross-model relations. If the unit name
+ is absent then fall back to unit_name attribute of the
+ unit associated with this request.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: Name of unit
+ :rtype: str
+ """
+ unit_name = unit.received_raw['unit_name']
+ if not unit_name:
+ unit_name = unit.unit_name
+ return unit_name
+
+ @property
+ def unit_name(self):
+ """Name of this unit.
+
+ :returns: Name of unit
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).replace('/', '_')
+
+ @property
+ def application_name(self):
+ """Name of the application which the request came from.
+
+ :returns: Name of application
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).split('/')[0]
+
+ @property
+ def cert_type(self):
+ """
+ Type of certificate, 'server' or 'client', being requested.
+ """
+ return self._cert_type
+
+ @property
+ def cert_name(self):
+ return self['certificate_name']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def sans(self):
+ return self['sans']
+
+ @property
+ def _publish_key(self):
+ if self.cert_type == 'server':
+ return '{}.processed_requests'.format(self.unit_name)
+ elif self.cert_type == 'client':
+ return '{}.processed_client_requests'.format(self.unit_name)
+ raise ValueError('Unknown cert_type: {}'.format(self.cert_type))
+
+ @property
+ def _server_cert_key(self):
+ return '{}.server.cert'.format(self.unit_name)
+
+ @property
+ def _server_key_key(self):
+ return '{}.server.key'.format(self.unit_name)
+
+ @property
+ def _is_top_level_server_cert(self):
+ return (self.cert_type == 'server' and
+ self.common_name == self._unit.received_raw['common_name'])
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+ """
+ cert, key = None, None
+ if self._is_top_level_server_cert:
+ tpr = self._unit.relation.to_publish_raw
+ cert = tpr[self._server_cert_key]
+ key = tpr[self._server_key_key]
+ else:
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get(self.common_name, {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ def set_cert(self, cert, key):
+ rel = self._unit.relation
+ if self._is_top_level_server_cert:
+ # backwards compatibility; if this is the cert that was requested
+ # as a single server cert, set it in the response as the single
+ # server cert
+ rel.to_publish_raw.update({
+ self._server_cert_key: cert,
+ self._server_key_key: key,
+ })
+ else:
+ data = rel.to_publish.get(self._publish_key, {})
+ data[self.common_name] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[self._publish_key] = data
+ if not rel.endpoint.new_server_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.server'
+ '.cert.requested'))
+ if not rel.endpoint.new_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.'
+ 'certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class ApplicationCertificateRequest(CertificateRequest):
+ """
+ A request for an application consistent certificate.
+
+ This is a request for a certificate that works for all units of an
+ application. All sans and cns are added together to produce one
+ certificate and the same certificate and key are sent to all the
+ units of an application. Only one ApplicationCertificateRequest
+ is needed per application.
+ """
+
+ @property
+ def _key(self):
+ """Key to identify this cert.
+
+ :returns: cert key
+ :rtype: str
+ """
+ return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert')
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+
+ :returns: Certificate
+ :rtype: Certificate or None
+ """
+ cert, key = None, None
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get('app_data', {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ """Whether the certificate has been handled.
+
+ :returns: If the cert has been handled
+ :rtype: bool
+ """
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ @property
+ def sans(self):
+ """Generate a list of all sans from all units of application
+
+ Examine all units of the application and compile a list of
+ all sans. CNs are treated as addition san entries.
+
+ :returns: List of sans
+ :rtype: List[str]
+ """
+ _sans = []
+ for unit in self._unit.relation.units:
+ reqs = unit.received['application_cert_requests'] or {}
+ for cn, req in reqs.items():
+ _sans.append(cn)
+ _sans.extend(req['sans'])
+ return sorted(list(set(_sans)))
+
+ @property
+ def _request_key(self):
+ """Key used to request cert
+
+ :returns: Key used to request cert
+ :rtype: str
+ """
+ return 'application_cert_requests'
+
+ def derive_publish_key(self, unit=None):
+ """Derive the application cert publish key for a unit.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: publish key
+ :rtype: str
+ """
+ if not unit:
+ unit = self._unit
+ unit_name = self.resolve_unit_name(unit).replace('/', '_')
+ return '{}.processed_application_requests'.format(unit_name)
+
+ @property
+ def _publish_key(self):
+ """Key used to publish cert
+
+ :returns: Key used to publish cert
+ :rtype: str
+ """
+ return self.derive_publish_key(unit=self._unit)
+
+ def set_cert(self, cert, key):
+ """Send the cert and key to all units of the application
+
+ :param cert: TLS Certificate
+ :type cert: str
+ :param key: TLS Private Key
+ :type cert: str
+ """
+ rel = self._unit.relation
+ for unit in self._unit.relation.units:
+ pub_key = self.derive_publish_key(unit=unit)
+ data = rel.to_publish.get(
+ pub_key,
+ {})
+ data['app_data'] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[pub_key] = data
+ if not rel.endpoint.new_application_requests:
+ clear_flag(rel.endpoint.expand_name(
+ '{endpoint_name}.application.certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class Certificate(dict):
+ """
+ Represents a created certificate and key.
+
+ The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+ be accessed either as properties or as the contents of the dict.
+ """
+ def __init__(self, cert_type, common_name, cert, key):
+ super().__init__({
+ 'cert_type': cert_type,
+ 'common_name': common_name,
+ 'cert': cert,
+ 'key': key,
+ })
+
+ @property
+ def cert_type(self):
+ return self['cert_type']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def cert(self):
+ return self['cert']
+
+ @property
+ def key(self):
+ return self['key']
diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini b/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini
new file mode 100644
index 0000000..90de9d3
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+envlist = py3
+skipsdist = true
+
+[testenv]
+basepython=python3
+envdir={toxworkdir}/py3
+deps=
+ pytest
+ charms.reactive
+ pydoc-markdown
+
+[testenv:docs]
+commands=python make_docs
+
+[flake8]
+ignore=E402
diff --git a/kubeapi-load-balancer/hooks/start b/kubeapi-load-balancer/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/stop b/kubeapi-load-balancer/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/update-status b/kubeapi-load-balancer/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/upgrade-charm b/kubeapi-load-balancer/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/website-relation-broken b/kubeapi-load-balancer/hooks/website-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/website-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/website-relation-changed b/kubeapi-load-balancer/hooks/website-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/website-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/website-relation-created b/kubeapi-load-balancer/hooks/website-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/website-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/website-relation-departed b/kubeapi-load-balancer/hooks/website-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/website-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/hooks/website-relation-joined b/kubeapi-load-balancer/hooks/website-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubeapi-load-balancer/hooks/website-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubeapi-load-balancer/icon.svg b/kubeapi-load-balancer/icon.svg
new file mode 100644
index 0000000..7f2998e
--- /dev/null
+++ b/kubeapi-load-balancer/icon.svg
@@ -0,0 +1,412 @@
+
+
+
+
diff --git a/kubeapi-load-balancer/layer.yaml b/kubeapi-load-balancer/layer.yaml
new file mode 100644
index 0000000..59f9e9c
--- /dev/null
+++ b/kubeapi-load-balancer/layer.yaml
@@ -0,0 +1,47 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "interface:nrpe-external-master"
+- "layer:status"
+- "layer:apt"
+- "interface:http"
+- "layer:debug"
+- "interface:tls-certificates"
+- "interface:hacluster"
+- "layer:kubernetes-common"
+- "layer:metrics"
+- "layer:nagios"
+- "layer:nginx"
+- "layer:tls-client"
+- "layer:hacluster"
+- "interface:public-address"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"]
+"options":
+ "apt":
+ "packages":
+ - "nginx-full"
+ "version_package": ""
+ "full_version": !!bool "false"
+ "keys": []
+ "tls-client":
+ "ca_certificate_path": "/srv/kubernetes/ca.crt"
+ "server_certificate_path": ""
+ "server_key_path": ""
+ "client_certificate_path": ""
+ "client_key_path": ""
+ "hacluster":
+ "binding_address": "website"
+ "basic":
+ "packages": []
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "nagios": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "nginx": {}
+ "debug": {}
+ "kubernetes-common": {}
+ "kubeapi-load-balancer": {}
+"repo": "https://github.com/kubernetes/kubernetes.git"
+"is": "kubeapi-load-balancer"
diff --git a/kubeapi-load-balancer/lib/.gitkeep b/kubeapi-load-balancer/lib/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/lib/charms/apt.py b/kubeapi-load-balancer/lib/charms/apt.py
new file mode 100644
index 0000000..14508c4
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/apt.py
@@ -0,0 +1,209 @@
+# Copyright 2015-2020 Canonical Ltd.
+#
+# This file is part of the Apt layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+'''
+charms.reactive helpers for dealing with deb packages.
+
+Add apt package sources using add_source(). Queue deb packages for
+installation with install(). Configure and work with your software
+once the apt.installed.{packagename} flag is set.
+'''
+import itertools
+import re
+import subprocess
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv, unitdata
+from charms import layer, reactive
+from charms.layer import status
+from charms.reactive import flags
+
+
+__all__ = ['add_source', 'update', 'queue_install', 'install_queued', 'installed', 'purge', 'ensure_package_status']
+
+
+def add_source(source, key=None):
+ '''Add an apt source.
+
+ Sets the apt.needs_update flag.
+
+ A source may be either a line that can be added directly to
+ sources.list(5), or in the form ppa:/ for adding
+ Personal Package Archives, or a distribution component to enable.
+
+ The package signing key should be an ASCII armoured GPG key. While
+ GPG key ids are also supported, the retrieval mechanism is insecure.
+ There is no need to specify the package signing key for PPAs or for
+ the main Ubuntu archives.
+ '''
+ # Maybe we should remember which sources have been added already
+ # so we don't waste time re-adding them. Is this time significant?
+ fetch.add_source(source, key)
+ reactive.set_flag('apt.needs_update')
+
+
+def queue_install(packages, options=None):
+ """Queue one or more deb packages for install.
+
+ The `apt.installed.{name}` flag is set once the package is installed.
+
+ If a package has already been installed it will not be reinstalled.
+
+ If a package has already been queued it will not be requeued, and
+ the install options will not be changed.
+
+ Sets the apt.queued_installs flag.
+ """
+ if isinstance(packages, str):
+ packages = [packages]
+ # Filter installed packages.
+ store = unitdata.kv()
+ queued_packages = store.getrange('apt.install_queue.', strip=True)
+ packages = {
+ package: options
+ for package in packages
+ if not (package in queued_packages or reactive.is_flag_set('apt.installed.' + package))
+ }
+ if packages:
+ unitdata.kv().update(packages, prefix='apt.install_queue.')
+ reactive.set_flag('apt.queued_installs')
+
+
+def installed():
+ '''Return the set of deb packages completed install'''
+ return set(flag.split('.', 2)[2] for flag in flags.get_flags() if flag.startswith('apt.installed.'))
+
+
+def purge(packages):
+ """Purge one or more deb packages from the system"""
+ fetch.apt_purge(packages, fatal=True)
+ store = unitdata.kv()
+ store.unsetrange(packages, prefix='apt.install_queue.')
+ for package in packages:
+ reactive.clear_flag('apt.installed.{}'.format(package))
+
+
+def update():
+ """Update the apt cache.
+
+ Removes the apt.needs_update flag.
+ """
+ status.maintenance('Updating apt cache')
+ fetch.apt_update(fatal=True) # Friends don't let friends set fatal=False
+ reactive.clear_flag('apt.needs_update')
+
+
+def install_queued():
+ '''Installs queued deb packages.
+
+ Removes the apt.queued_installs flag and sets the apt.installed flag.
+
+ On failure, sets the unit's workload status to 'blocked' and returns
+ False. Package installs remain queued.
+
+ On success, sets the apt.installed.{packagename} flag for each
+ installed package and returns True.
+ '''
+ store = unitdata.kv()
+ queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items())
+
+ installed = set()
+ for options, batch in itertools.groupby(queue, lambda x: x[0]):
+ packages = [b[1] for b in batch]
+ try:
+ status.maintenance('Installing {}'.format(','.join(packages)))
+ fetch.apt_install(packages, options, fatal=True)
+ store.unsetrange(packages, prefix='apt.install_queue.')
+ installed.update(packages)
+ except subprocess.CalledProcessError:
+ status.blocked('Unable to install packages {}'.format(','.join(packages)))
+ return False # Without setting reactive flag.
+
+ for package in installed:
+ reactive.set_flag('apt.installed.{}'.format(package))
+ reactive.clear_flag('apt.queued_installs')
+
+ reset_application_version()
+
+ return True
+
+
+def get_package_version(package, full_version=False):
+ '''Return the version of an installed package.
+
+ If `full_version` is True, returns the full Debian package version.
+ Otherwise, returns the shorter 'upstream' version number.
+ '''
+ # Don't use fetch.get_upstream_version, as it depends on python-apt
+ # and not available if the basic layer's use_site_packages option is off.
+ cmd = ['dpkg-query', '--show', r'--showformat=${Version}\n', package]
+ full = subprocess.check_output(cmd, universal_newlines=True).strip()
+ if not full_version:
+ # Attempt to strip off Debian style metadata from the end of the
+ # version number.
+ m = re.search(r'^([\d.a-z]+)', full, re.I)
+ if m is not None:
+ return m.group(1)
+ return full
+
+
+def reset_application_version():
+ '''Set the Juju application version, per settings in layer.yaml'''
+ # Reset the application version. We call this after installing
+ # packages to initialize the version. We also call this every
+ # hook, incase the version has changed (eg. Landscape upgraded
+ # the package).
+ opts = layer.options().get('apt', {})
+ pkg = opts.get('version_package')
+ if pkg and pkg in installed():
+ ver = get_package_version(pkg, opts.get('full_version', False))
+ hookenv.application_version_set(ver)
+
+
+def ensure_package_status():
+ '''Hold or unhold packages per the package_status configuration option.
+
+ All packages installed using this module and handlers are affected.
+
+ An mechanism may be added in the future to override this for a
+ subset of installed packages.
+ '''
+ packages = installed()
+ if not packages:
+ return
+ config = hookenv.config()
+ package_status = config.get('package_status') or ''
+ changed = reactive.data_changed('apt.package_status', (package_status, sorted(packages)))
+ if changed:
+ if package_status == 'hold':
+ hookenv.log('Holding packages {}'.format(','.join(packages)))
+ fetch.apt_hold(packages)
+ else:
+ hookenv.log('Unholding packages {}'.format(','.join(packages)))
+ fetch.apt_unhold(packages)
+ reactive.clear_flag('apt.needs_hold')
+
+
+def status_set(state, message):
+ '''DEPRECATED, set the unit's workload status.
+
+ Set state == None to keep the same state and just change the message.
+ '''
+ if state is None:
+ state = hookenv.status_get()[0]
+ if state not in ('active', 'waiting', 'blocked'):
+ state = 'maintenance' # Guess
+ status.status_set(state, message)
diff --git a/kubeapi-load-balancer/lib/charms/layer/__init__.py b/kubeapi-load-balancer/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/kubeapi-load-balancer/lib/charms/layer/basic.py b/kubeapi-load-balancer/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/kubeapi-load-balancer/lib/charms/layer/execd.py b/kubeapi-load-balancer/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<>> `get_version('kubelet')
+ (1, 6, 0)
+
+ """
+ cmd = '{} --version'.format(bin_name).split()
+ version_string = subprocess.check_output(cmd).decode('utf-8')
+ return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
+
+
+def retry(times, delay_secs):
+ """ Decorator for retrying a method call.
+
+ Args:
+ times: How many times should we retry before giving up
+ delay_secs: Delay in secs
+
+ Returns: A callable that would return the last call outcome
+ """
+
+ def retry_decorator(func):
+ """ Decorator to wrap the function provided.
+
+ Args:
+ func: Provided function should return either True od False
+
+ Returns: A callable that would return the last call outcome
+
+ """
+ def _wrapped(*args, **kwargs):
+ res = func(*args, **kwargs)
+ attempt = 0
+ while not res and attempt < times:
+ sleep(delay_secs)
+ res = func(*args, **kwargs)
+ if res:
+ break
+ attempt += 1
+ return res
+ return _wrapped
+
+ return retry_decorator
+
+
+def calculate_resource_checksum(resource):
+ ''' Calculate a checksum for a resource '''
+ md5 = hashlib.md5()
+ path = hookenv.resource_get(resource)
+ if path:
+ with open(path, 'rb') as f:
+ data = f.read()
+ md5.update(data)
+ return md5.hexdigest()
+
+
+def get_resource_checksum_db_key(checksum_prefix, resource):
+ ''' Convert a resource name to a resource checksum database key. '''
+ return checksum_prefix + resource
+
+
+def migrate_resource_checksums(checksum_prefix, snap_resources):
+ ''' Migrate resource checksums from the old schema to the new one '''
+ for resource in snap_resources:
+ new_key = get_resource_checksum_db_key(checksum_prefix, resource)
+ if not db.get(new_key):
+ path = hookenv.resource_get(resource)
+ if path:
+ # old key from charms.reactive.helpers.any_file_changed
+ old_key = 'reactive.files_changed.' + path
+ old_checksum = db.get(old_key)
+ db.set(new_key, old_checksum)
+ else:
+ # No resource is attached. Previously, this meant no checksum
+ # would be calculated and stored. But now we calculate it as if
+ # it is a 0-byte resource, so let's go ahead and do that.
+ zero_checksum = hashlib.md5().hexdigest()
+ db.set(new_key, zero_checksum)
+
+
+def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
+ hookenv.status_set('maintenance', 'Checking resources')
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ old_checksum = db.get(key)
+ new_checksum = calculate_resource_checksum(resource)
+ if new_checksum != old_checksum:
+ return True
+ return False
+
+
+def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ checksum = calculate_resource_checksum(resource)
+ db.set(key, checksum)
+
+
+def get_ingress_address(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ # if they don't have ingress-addresses they are running a juju that
+ # doesn't support spaces, so just return the private address
+ return hookenv.unit_get('private-address')
+
+ addresses = network_info['ingress-addresses']
+
+ # Need to prefer non-fan IP addresses due to various issues, e.g.
+ # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
+ # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
+ # prioritize those last. Not technically correct, but good enough.
+ try:
+ sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731
+ addresses = sorted(addresses, key=sort_key)
+ except Exception:
+ hookenv.log(traceback.format_exc())
+
+ return addresses[0]
+
+
+def get_ingress_address6(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ return None
+
+ addresses = network_info['ingress-addresses']
+
+ for addr in addresses:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version == 6:
+ return str(ip_addr)
+ else:
+ return None
+
+
+def service_restart(service_name):
+ hookenv.status_set('maintenance', 'Restarting {0} service'.format(
+ service_name))
+ host.service_restart(service_name)
+
+
+def service_start(service_name):
+ hookenv.log('Starting {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def service_stop(service_name):
+ hookenv.log('Stopping {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def arch():
+ '''Return the package architecture as a string. Raise an exception if the
+ architecture is not supported by kubernetes.'''
+ # Get the package architecture for this system.
+ architecture = check_output(['dpkg', '--print-architecture']).rstrip()
+ # Convert the binary result into a string.
+ architecture = architecture.decode('utf-8')
+ return architecture
+
+
+def get_service_ip(service, namespace="kube-system", errors_fatal=True):
+ try:
+ output = kubectl('get', 'service', '--namespace', namespace, service,
+ '--output', 'json')
+ except CalledProcessError:
+ if errors_fatal:
+ raise
+ else:
+ return None
+ else:
+ svc = json.loads(output.decode())
+ return svc['spec']['clusterIP']
+
+
+def kubectl(*args):
+ ''' Run a kubectl cli command with a config file. Returns stdout and throws
+ an error if the command fails. '''
+ command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
+ hookenv.log('Executing {}'.format(command))
+ return check_output(command)
+
+
+def kubectl_success(*args):
+ ''' Runs kubectl with the given args. Returns True if successful, False if
+ not. '''
+ try:
+ kubectl(*args)
+ return True
+ except CalledProcessError:
+ return False
+
+
+def kubectl_manifest(operation, manifest):
+ ''' Wrap the kubectl creation command when using filepath resources
+ :param operation - one of get, create, delete, replace
+ :param manifest - filepath to the manifest
+ '''
+ # Deletions are a special case
+ if operation == 'delete':
+ # Ensure we immediately remove requested resources with --now
+ return kubectl_success(operation, '-f', manifest, '--now')
+ else:
+ # Guard against an error re-creating the same manifest multiple times
+ if operation == 'create':
+ # If we already have the definition, its probably safe to assume
+ # creation was true.
+ if kubectl_success('get', '-f', manifest):
+ hookenv.log('Skipping definition for {}'.format(manifest))
+ return True
+ # Execute the requested command that did not match any of the special
+ # cases above
+ return kubectl_success(operation, '-f', manifest)
+
+
+def get_node_name():
+ kubelet_extra_args = parse_extra_args('kubelet-extra-args')
+ cloud_provider = kubelet_extra_args.get('cloud-provider', '')
+ if is_state('endpoint.aws.ready'):
+ cloud_provider = 'aws'
+ elif is_state('endpoint.gcp.ready'):
+ cloud_provider = 'gce'
+ elif is_state('endpoint.openstack.ready'):
+ cloud_provider = 'openstack'
+ elif is_state('endpoint.vsphere.ready'):
+ cloud_provider = 'vsphere'
+ elif is_state('endpoint.azure.ready'):
+ cloud_provider = 'azure'
+ if cloud_provider == 'aws':
+ return getfqdn().lower()
+ else:
+ return gethostname().lower()
+
+
+def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
+ user='ubuntu', context='juju-context',
+ cluster='juju-cluster', password=None, token=None,
+ keystone=False, aws_iam_cluster_id=None):
+ '''Create a configuration for Kubernetes based on path using the supplied
+ arguments for values of the Kubernetes server, CA, key, certificate, user
+ context and cluster.'''
+ if not key and not certificate and not password and not token:
+ raise ValueError('Missing authentication mechanism.')
+
+ # token and password are mutually exclusive. Error early if both are
+ # present. The developer has requested an impossible situation.
+ # see: kubectl config set-credentials --help
+ if token and password:
+ raise ValueError('Token and Password are mutually exclusive.')
+ # Create the config file with the address of the master server.
+ cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
+ '--server={2} --certificate-authority={3} --embed-certs=true'
+ check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
+ # Delete old users
+ cmd = 'kubectl config --kubeconfig={0} unset users'
+ check_call(split(cmd.format(kubeconfig)))
+ # Create the credentials using the client flags.
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials {1} '.format(kubeconfig, user)
+
+ if key and certificate:
+ cmd = '{0} --client-key={1} --client-certificate={2} '\
+ '--embed-certs=true'.format(cmd, key, certificate)
+ if password:
+ cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
+ # This is mutually exclusive from password. They will not work together.
+ if token:
+ cmd = "{0} --token={1}".format(cmd, token)
+ check_call(split(cmd))
+ # Create a default context with the cluster.
+ cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
+ '--cluster={2} --user={3}'
+ check_call(split(cmd.format(kubeconfig, context, cluster, user)))
+ # Make the config use this new context.
+ cmd = 'kubectl config --kubeconfig={0} use-context {1}'
+ check_call(split(cmd.format(kubeconfig, context)))
+ if keystone:
+ # create keystone user
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials keystone-user'.format(kubeconfig)
+ check_call(split(cmd))
+ # create keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=keystone-user keystone'.format(kubeconfig, cluster)
+ check_call(split(cmd))
+ # use keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'use-context keystone'.format(kubeconfig)
+ check_call(split(cmd))
+ # manually add exec command until kubectl can do it for us
+ with open(kubeconfig, "r") as f:
+ content = f.read()
+ content = content.replace("""- name: keystone-user
+ user: {}""", """- name: keystone-user
+ user:
+ exec:
+ command: "/snap/bin/client-keystone-auth"
+ apiVersion: "client.authentication.k8s.io/v1beta1"
+""")
+ with open(kubeconfig, "w") as f:
+ f.write(content)
+ if aws_iam_cluster_id:
+ # create aws-iam context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=aws-iam-user aws-iam-authenticator'
+ check_call(split(cmd.format(kubeconfig, cluster)))
+
+ # append a user for aws-iam
+ cmd = 'kubectl --kubeconfig={0} config set-credentials ' \
+ 'aws-iam-user --exec-command=aws-iam-authenticator ' \
+ '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \
+ '--exec-arg="-r" --exec-arg="<>" ' \
+ '--exec-api-version=client.authentication.k8s.io/v1alpha1'
+ check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id)))
+
+ # not going to use aws-iam context by default since we don't have
+ # the desired arn. This will make the config not usable if copied.
+
+ # cmd = 'kubectl config --kubeconfig={0} ' \
+ # 'use-context aws-iam-authenticator'.format(kubeconfig)
+ # check_call(split(cmd))
+
+
+def parse_extra_args(config_key):
+ elements = hookenv.config().get(config_key, '').split()
+ args = {}
+
+ for element in elements:
+ if '=' in element:
+ key, _, value = element.partition('=')
+ args[key] = value
+ else:
+ args[element] = 'true'
+
+ return args
+
+
+def configure_kubernetes_service(key, service, base_args, extra_args_key):
+ db = unitdata.kv()
+
+ prev_args_key = key + service
+ prev_snap_args = db.get(prev_args_key) or {}
+
+ extra_args = parse_extra_args(extra_args_key)
+
+ args = {}
+ args.update(base_args)
+ args.update(extra_args)
+
+ # CIS benchmark action may inject kv config to pass failing tests. Merge
+ # these after the func args as they should take precedence.
+ cis_args_key = 'cis-' + service
+ cis_args = db.get(cis_args_key) or {}
+ args.update(cis_args)
+
+ # Remove any args with 'None' values (all k8s args are 'k=v') and
+ # construct an arg string for use by 'snap set'.
+ args = {k: v for k, v in args.items() if v is not None}
+ args = ['--%s="%s"' % arg for arg in args.items()]
+ args = ' '.join(args)
+
+ snap_opts = {}
+ for arg in prev_snap_args:
+ # remove previous args by setting to null
+ snap_opts[arg] = 'null'
+ snap_opts['args'] = args
+ snap_opts = ['%s=%s' % opt for opt in snap_opts.items()]
+
+ cmd = ['snap', 'set', service] + snap_opts
+ check_call(cmd)
+
+ # Now that we've started doing snap configuration through the "args"
+ # option, we should never need to clear previous args again.
+ db.set(prev_args_key, {})
+
+
+def _snap_common_path(component):
+ return Path('/var/snap/{}/common'.format(component))
+
+
+def cloud_config_path(component):
+ return _snap_common_path(component) / 'cloud-config.conf'
+
+
+def _gcp_creds_path(component):
+ return _snap_common_path(component) / 'gcp-creds.json'
+
+
+def _daemon_env_path(component):
+ return _snap_common_path(component) / 'environment'
+
+
+def _cloud_endpoint_ca_path(component):
+ return _snap_common_path(component) / 'cloud-endpoint-ca.crt'
+
+
+def encryption_config_path():
+ apiserver_snap_common_path = _snap_common_path('kube-apiserver')
+ encryption_conf_dir = apiserver_snap_common_path / 'encryption'
+ return encryption_conf_dir / 'encryption_config.yaml'
+
+
+def write_gcp_snap_config(component):
+ # gcp requires additional credentials setup
+ gcp = endpoint_from_flag('endpoint.gcp.ready')
+ creds_path = _gcp_creds_path(component)
+ with creds_path.open('w') as fp:
+ os.fchmod(fp.fileno(), 0o600)
+ fp.write(gcp.credentials)
+
+ # create a cloud-config file that sets token-url to nil to make the
+ # services use the creds env var instead of the metadata server, as
+ # well as making the cluster multizone
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text('[Global]\n'
+ 'token-url = nil\n'
+ 'multizone = true\n')
+
+ daemon_env_path = _daemon_env_path(component)
+ if daemon_env_path.exists():
+ daemon_env = daemon_env_path.read_text()
+ if not daemon_env.endswith('\n'):
+ daemon_env += '\n'
+ else:
+ daemon_env = ''
+ if gcp_creds_env_key not in daemon_env:
+ daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
+ daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
+ daemon_env_path.write_text(daemon_env)
+
+
+def generate_openstack_cloud_config():
+ # openstack requires additional credentials setup
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+
+ lines = [
+ '[Global]',
+ 'auth-url = {}'.format(openstack.auth_url),
+ 'region = {}'.format(openstack.region),
+ 'username = {}'.format(openstack.username),
+ 'password = {}'.format(openstack.password),
+ 'tenant-name = {}'.format(openstack.project_name),
+ 'domain-name = {}'.format(openstack.user_domain_name),
+ 'tenant-domain-name = {}'.format(openstack.project_domain_name),
+ ]
+ if openstack.endpoint_tls_ca:
+ lines.append('ca-file = /etc/config/endpoint-ca.cert')
+
+ lines.extend([
+ '',
+ '[LoadBalancer]',
+ ])
+
+ if openstack.has_octavia in (True, None):
+ # Newer integrator charm will detect whether underlying OpenStack has
+ # Octavia enabled so we can set this intelligently. If we're still
+ # related to an older integrator, though, default to assuming Octavia
+ # is available.
+ lines.append('use-octavia = true')
+ else:
+ lines.append('use-octavia = false')
+ lines.append('lb-provider = haproxy')
+ if openstack.subnet_id:
+ lines.append('subnet-id = {}'.format(openstack.subnet_id))
+ if openstack.floating_network_id:
+ lines.append('floating-network-id = {}'.format(
+ openstack.floating_network_id))
+ if openstack.lb_method:
+ lines.append('lb-method = {}'.format(
+ openstack.lb_method))
+ if openstack.manage_security_groups:
+ lines.append('manage-security-groups = {}'.format(
+ openstack.manage_security_groups))
+ if any([openstack.bs_version,
+ openstack.trust_device_path,
+ openstack.ignore_volume_az]):
+ lines.append('')
+ lines.append('[BlockStorage]')
+ if openstack.bs_version is not None:
+ lines.append('bs-version = {}'.format(openstack.bs_version))
+ if openstack.trust_device_path is not None:
+ lines.append('trust-device-path = {}'.format(
+ openstack.trust_device_path))
+ if openstack.ignore_volume_az is not None:
+ lines.append('ignore-volume-az = {}'.format(
+ openstack.ignore_volume_az))
+ return '\n'.join(lines) + '\n'
+
+
+def write_azure_snap_config(component):
+ azure = endpoint_from_flag('endpoint.azure.ready')
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text(json.dumps({
+ 'useInstanceMetadata': True,
+ 'useManagedIdentityExtension': True,
+ 'subscriptionId': azure.subscription_id,
+ 'resourceGroup': azure.resource_group,
+ 'location': azure.resource_group_location,
+ 'vnetName': azure.vnet_name,
+ 'vnetResourceGroup': azure.vnet_resource_group,
+ 'subnetName': azure.subnet_name,
+ 'securityGroupName': azure.security_group_name,
+ 'loadBalancerSku': 'standard'
+ }))
+
+
+def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr,
+ bind_address=None):
+ kube_proxy_opts = {}
+ kube_proxy_opts['cluster-cidr'] = cluster_cidr
+ kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
+ kube_proxy_opts['logtostderr'] = 'true'
+ kube_proxy_opts['v'] = '0'
+ num_apis = len(api_servers)
+ kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis]
+ kube_proxy_opts['hostname-override'] = get_node_name()
+ if bind_address:
+ kube_proxy_opts['bind-address'] = bind_address
+ elif is_ipv6(cluster_cidr):
+ kube_proxy_opts['bind-address'] = '::'
+
+ if host.is_container():
+ kube_proxy_opts['conntrack-max-per-core'] = '0'
+
+ if is_dual_stack(cluster_cidr):
+ kube_proxy_opts['feature-gates'] = "IPv6DualStack=true"
+
+ configure_kubernetes_service(configure_prefix, 'kube-proxy',
+ kube_proxy_opts, 'proxy-extra-args')
+
+
+def get_unit_number():
+ return int(hookenv.local_unit().split('/')[1])
+
+
+def cluster_cidr():
+ '''Return the cluster CIDR provided by the CNI'''
+ cni = endpoint_from_flag('cni.available')
+ if not cni:
+ return None
+ config = hookenv.config()
+ if 'default-cni' in config:
+ # master
+ default_cni = config['default-cni']
+ else:
+ # worker
+ kube_control = endpoint_from_flag('kube-control.dns.available')
+ if not kube_control:
+ return None
+ default_cni = kube_control.get_default_cni()
+ return cni.get_config(default=default_cni)['cidr']
+
+
+def is_dual_stack(cidrs):
+ '''Detect IPv4/IPv6 dual stack from CIDRs'''
+ return {net.version for net in get_networks(cidrs)} == {4, 6}
+
+
+def is_ipv4(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv4_network(cidrs) is not None
+
+
+def is_ipv6(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv6_network(cidrs) is not None
+
+
+def is_ipv6_preferred(cidrs):
+ '''Detect if IPv6 is preffered from CIDRs'''
+ return get_networks(cidrs)[0].version == 6
+
+
+def get_networks(cidrs):
+ '''Convert a comma-separated list of CIDRs to a list of networks.'''
+ if not cidrs:
+ return []
+ return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')]
+
+
+def get_ipv4_network(cidrs):
+ '''Get the IPv4 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(4)
+
+
+def get_ipv6_network(cidrs):
+ '''Get the IPv6 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(6)
+
+
+def enable_ipv6_forwarding():
+ '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.'''
+ check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1'])
+
+
+def get_bind_addrs(ipv4=True, ipv6=True):
+ '''Get all global-scoped addresses that we might bind to.'''
+ try:
+ output = check_output(["ip", "-br", "addr", "show", "scope", "global"])
+ except CalledProcessError:
+ # stderr will have any details, and go to the log
+ hookenv.log('Unable to determine global addresses', hookenv.ERROR)
+ return []
+
+ ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker')
+ accept_versions = set()
+ if ipv4:
+ accept_versions.add(4)
+ if ipv6:
+ accept_versions.add(6)
+
+ addrs = []
+ for line in output.decode('utf8').splitlines():
+ intf, state, *intf_addrs = line.split()
+ if state != 'UP' or any(intf.startswith(prefix)
+ for prefix in ignore_interfaces):
+ continue
+ for addr in intf_addrs:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version in accept_versions:
+ addrs.append(str(ip_addr))
+ return addrs
+
+
+class InvalidVMwareHost(Exception):
+ pass
+
+
+def _get_vmware_uuid():
+ serial_id_file = '/sys/class/dmi/id/product_serial'
+ # The serial id from VMWare VMs comes in following format:
+ # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54
+ try:
+ with open(serial_id_file, 'r') as f:
+ serial_string = f.read().strip()
+ if "VMware-" not in serial_string:
+ hookenv.log("Unable to find VMware ID in "
+ "product_serial: {}".format(serial_string))
+ raise InvalidVMwareHost
+ serial_string = serial_string.split(
+ "VMware-")[1].replace(" ", "").replace("-", "")
+ uuid = "%s-%s-%s-%s-%s" % (
+ serial_string[0:8], serial_string[8:12], serial_string[12:16],
+ serial_string[16:20], serial_string[20:32])
+ except IOError as err:
+ hookenv.log("Unable to read UUID from sysfs: {}".format(err))
+ uuid = 'UNKNOWN'
+
+ return uuid
+
diff --git a/kubeapi-load-balancer/lib/charms/layer/nagios.py b/kubeapi-load-balancer/lib/charms/layer/nagios.py
new file mode 100644
index 0000000..f6ad998
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/nagios.py
@@ -0,0 +1,60 @@
+from pathlib import Path
+
+NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins'
+
+
+def install_nagios_plugin_from_text(text, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ text: Plugin source code (str)
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ # we could complain here, test the files are the same contents, or
+ # just bail. Idempotency is a big deal in Juju, so I'd like to be
+ # ok with being called with the same file multiple times, but we
+ # certainly want to catch the case where multiple layers are using
+ # the same filename for their nagios checks.
+ dest = dest_path.read_text()
+ if dest == text:
+ # same file
+ return dest_path
+ # different file contents!
+ # maybe someone changed options or something so we need to write
+ # it again
+
+ dest_path.write_text(text)
+ dest_path.chmod(0o755)
+
+ return dest_path
+
+
+def install_nagios_plugin_from_file(source_file_path, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ source_file_path: Path to plugin source file
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+
+ return install_nagios_plugin_from_text(Path(source_file_path).read_text(),
+ plugin_name)
+
+
+def remove_nagios_plugin(plugin_name):
+ """ Remove a nagios plugin.
+
+ Args:
+ plugin_name: Name of the plugin in nagios
+
+ Returns: None
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ dest_path.unlink()
diff --git a/kubeapi-load-balancer/lib/charms/layer/nginx.py b/kubeapi-load-balancer/lib/charms/layer/nginx.py
new file mode 100644
index 0000000..7194400
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/nginx.py
@@ -0,0 +1,74 @@
+from charmhelpers.core.templating import render
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+
+import toml
+import os
+
+
+def load_site():
+ if not os.path.isfile('site.toml'):
+ return {}
+
+ with open('site.toml') as fp:
+ conf = toml.loads(fp.read())
+
+ return conf
+
+
+def get_app_path():
+ site = load_site()
+ if 'app_path' in site:
+ return site['app_path']
+ return '/srv/app'
+
+
+def remove_default_site():
+ """
+ Remove the default enabled
+ site.
+
+ :return: Boolean
+ """
+ site_path = '/etc/nginx/sites-enabled/default'
+ if os.path.isfile(site_path):
+ os.remove(site_path)
+ host.service_reload('nginx', restart_on_failure=True)
+ return True
+
+ return False
+
+
+def configure_site(site, template, **kwargs):
+ """ configures vhost
+
+ Arguments:
+ site: Site name
+ template: template to process in templates/
+ **kwargs: additional dict items to append to template variables exposed
+ through the site.toml
+ """
+ hookenv.status_set('maintenance', 'Configuring site {}'.format(site))
+
+ config = hookenv.config()
+ context = load_site()
+ context['host'] = config['host']
+ context['port'] = config['port']
+ context.update(**kwargs)
+ conf_path = '/etc/nginx/sites-available/{}'.format(site)
+ if os.path.exists(conf_path):
+ os.remove(conf_path)
+ render(source=template,
+ target=conf_path,
+ context=context)
+
+ symlink_path = '/etc/nginx/sites-enabled/{}'.format(site)
+ if os.path.exists(symlink_path):
+ os.unlink(symlink_path)
+ os.symlink(conf_path, symlink_path)
+ hookenv.log('Wrote vhost config {} to {}'.format(context, template),
+ 'info')
+
+ if not remove_default_site():
+ host.service_reload('nginx', restart_on_failure=True)
+ hookenv.status_set('active', '')
diff --git a/kubeapi-load-balancer/lib/charms/layer/options.py b/kubeapi-load-balancer/lib/charms/layer/options.py
new file mode 100644
index 0000000..d3f273f
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/options.py
@@ -0,0 +1,26 @@
+import os
+from pathlib import Path
+
+import yaml
+
+
+_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.'))
+_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml'
+_CACHE = {}
+
+
+def get(section=None, option=None, layer_file=_DEFAULT_FILE):
+ if option and not section:
+ raise ValueError('Cannot specify option without section')
+
+ layer_file = (_CHARM_PATH / layer_file).resolve()
+ if layer_file not in _CACHE:
+ with layer_file.open() as fp:
+ _CACHE[layer_file] = yaml.safe_load(fp.read())
+
+ data = _CACHE[layer_file].get('options', {})
+ if section:
+ data = data.get(section, {})
+ if option:
+ data = data.get(option)
+ return data
diff --git a/kubeapi-load-balancer/lib/charms/layer/status.py b/kubeapi-load-balancer/lib/charms/layer/status.py
new file mode 100644
index 0000000..95b2997
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/status.py
@@ -0,0 +1,189 @@
+import inspect
+import errno
+import subprocess
+import yaml
+from enum import Enum
+from functools import wraps
+from pathlib import Path
+
+from charmhelpers.core import hookenv
+from charms import layer
+
+
+_orig_call = subprocess.call
+_statuses = {'_initialized': False,
+ '_finalized': False}
+
+
+class WorkloadState(Enum):
+ """
+ Enum of the valid workload states.
+
+ Valid options are:
+
+ * `WorkloadState.MAINTENANCE`
+ * `WorkloadState.BLOCKED`
+ * `WorkloadState.WAITING`
+ * `WorkloadState.ACTIVE`
+ """
+ # note: order here determines precedence of state
+ MAINTENANCE = 'maintenance'
+ BLOCKED = 'blocked'
+ WAITING = 'waiting'
+ ACTIVE = 'active'
+
+
+def maintenance(message):
+ """
+ Set the status to the `MAINTENANCE` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.MAINTENANCE, message)
+
+
+def maint(message):
+ """
+ Shorthand alias for
+ [maintenance](status.md#charms.layer.status.maintenance).
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ maintenance(message)
+
+
+def blocked(message):
+ """
+ Set the status to the `BLOCKED` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.BLOCKED, message)
+
+
+def waiting(message):
+ """
+ Set the status to the `WAITING` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.WAITING, message)
+
+
+def active(message):
+ """
+ Set the status to the `ACTIVE` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.ACTIVE, message)
+
+
+def status_set(workload_state, message):
+ """
+ Set the status to the given workload state with a message.
+
+ # Parameters
+ `workload_state` (WorkloadState or str): State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+ `message` (str): Message to convey to the operator.
+ """
+ if not isinstance(workload_state, WorkloadState):
+ workload_state = WorkloadState(workload_state)
+ if workload_state is WorkloadState.MAINTENANCE:
+ _status_set_immediate(workload_state, message)
+ return
+ layer = _find_calling_layer()
+ _statuses.setdefault(workload_state, []).append((layer, message))
+ if not _statuses['_initialized'] or _statuses['_finalized']:
+ # We either aren't initialized, so the finalizer may never be run,
+ # or the finalizer has already run, so it won't run again. In either
+ # case, we need to manually invoke it to ensure the status gets set.
+ _finalize()
+
+
+def _find_calling_layer():
+ for frame in inspect.stack():
+ # switch to .filename when trusty (Python 3.4) is EOL
+ fn = Path(frame[1])
+ if fn.parent.stem not in ('reactive', 'layer', 'charms'):
+ continue
+ layer_name = fn.stem
+ if layer_name == 'status':
+ continue # skip our own frames
+ return layer_name
+ return None
+
+
+def _initialize():
+ if not _statuses['_initialized']:
+ if layer.options.get('status', 'patch-hookenv'):
+ _patch_hookenv()
+ hookenv.atexit(_finalize)
+ _statuses['_initialized'] = True
+
+
+def _finalize():
+ if _statuses['_initialized']:
+ # If we haven't been initialized, we can't truly be finalized.
+ # This makes things more efficient if an action sets a status
+ # but subsequently starts the reactive bus.
+ _statuses['_finalized'] = True
+ charm_name = hookenv.charm_name()
+ charm_dir = Path(hookenv.charm_dir())
+ with charm_dir.joinpath('layer.yaml').open() as fp:
+ includes = yaml.safe_load(fp.read()).get('includes', [])
+ layer_order = includes + [charm_name]
+
+ for workload_state in WorkloadState:
+ if workload_state not in _statuses:
+ continue
+ if not _statuses[workload_state]:
+ continue
+
+ def _get_key(record):
+ layer_name, message = record
+ if layer_name in layer_order:
+ return layer_order.index(layer_name)
+ else:
+ return 0
+
+ sorted_statuses = sorted(_statuses[workload_state], key=_get_key)
+ layer_name, message = sorted_statuses[-1]
+ _status_set_immediate(workload_state, message)
+ break
+
+
+def _status_set_immediate(workload_state, message):
+ workload_state = workload_state.value
+ try:
+ hookenv.log('status-set: {}: {}'.format(workload_state, message),
+ hookenv.INFO)
+ ret = _orig_call(['status-set', workload_state, message])
+ if ret == 0:
+ return
+ except OSError as e:
+ # ignore status-set not available on older controllers
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def _patch_hookenv():
+ # we can't patch hookenv.status_set directly because other layers may have
+ # already imported it into their namespace, so we have to patch sp.call
+ subprocess.call = _patched_call
+
+
+@wraps(_orig_call)
+def _patched_call(cmd, *args, **kwargs):
+ if not isinstance(cmd, list) or cmd[0] != 'status-set':
+ return _orig_call(cmd, *args, **kwargs)
+ _, workload_state, message = cmd
+ status_set(workload_state, message)
+ return 0 # make hookenv.status_set not emit spurious failure logs
diff --git a/kubeapi-load-balancer/lib/charms/layer/tls_client.py b/kubeapi-load-balancer/lib/charms/layer/tls_client.py
new file mode 100644
index 0000000..b2980dc
--- /dev/null
+++ b/kubeapi-load-balancer/lib/charms/layer/tls_client.py
@@ -0,0 +1,61 @@
+# Copyright 2016-2017 Canonical Ltd.
+#
+# This file is part of the tls-client layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import log
+from charmhelpers.core import unitdata
+
+from charms.reactive import remove_state
+from charms.reactive import endpoint_from_flag
+
+
+def reset_certificate_write_flag(cert_type):
+ """
+ Reset the certificate written flag so notification will work on the next
+ write cert_type must be 'server', 'client', or 'ca' to indicate type of
+ certificate
+ """
+ if cert_type not in ['server', 'client', 'ca']:
+ log('Unknown certificate type!')
+ else:
+ remove_state('tls_client.{0}.certificate.written'.format(cert_type))
+
+
+def request_server_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_server_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('server', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
+
+
+def request_client_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_client_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('client', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
diff --git a/kubeapi-load-balancer/lib/debug_script.py b/kubeapi-load-balancer/lib/debug_script.py
new file mode 100644
index 0000000..e156924
--- /dev/null
+++ b/kubeapi-load-balancer/lib/debug_script.py
@@ -0,0 +1,8 @@
+import os
+
+dir = os.environ["DEBUG_SCRIPT_DIR"]
+
+
+def open_file(path, *args, **kwargs):
+ """ Open a file within the debug script dir """
+ return open(os.path.join(dir, path), *args, **kwargs)
diff --git a/kubeapi-load-balancer/lib/nginxlib.py b/kubeapi-load-balancer/lib/nginxlib.py
new file mode 100644
index 0000000..1bd5e73
--- /dev/null
+++ b/kubeapi-load-balancer/lib/nginxlib.py
@@ -0,0 +1,4 @@
+from warnings import warn
+from charms.layer.nginx import * # noqa
+
+warn('nginxlib is being deprecated, use charms.layer.nginx instead')
diff --git a/kubeapi-load-balancer/make_docs b/kubeapi-load-balancer/make_docs
new file mode 100644
index 0000000..dcd4c1f
--- /dev/null
+++ b/kubeapi-load-balancer/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import os
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ sys.path.insert(0, 'lib')
+ sys.path.insert(1, 'reactive')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
+ if os.path.exists('.unit-state.db'):
+ os.remove('.unit-state.db')
diff --git a/kubeapi-load-balancer/metadata.yaml b/kubeapi-load-balancer/metadata.yaml
new file mode 100644
index 0000000..4f96233
--- /dev/null
+++ b/kubeapi-load-balancer/metadata.yaml
@@ -0,0 +1,36 @@
+"name": "kubeapi-load-balancer"
+"summary": |-
+ Nginx Load Balancer
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Rye Terrell "
+- "Konstantinos Tsakalozos "
+- "Charles Butler "
+- "Matthew Bruzek "
+"description": |
+ A round robin Nginx load balancer to distribute traffic for kubernetes apiservers.
+"tags":
+- "application"
+- "nginx"
+- "misc"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "certificates":
+ "interface": "tls-certificates"
+ "ha":
+ "interface": "hacluster"
+ "apiserver":
+ "interface": "http"
+"provides":
+ "nrpe-external-master":
+ "interface": "nrpe-external-master"
+ "scope": "container"
+ "website":
+ "interface": "http"
+ "loadbalancer":
+ "interface": "public-address"
+"subordinate": !!bool "false"
diff --git a/kubeapi-load-balancer/metrics.yaml b/kubeapi-load-balancer/metrics.yaml
new file mode 100644
index 0000000..0fcb3c1
--- /dev/null
+++ b/kubeapi-load-balancer/metrics.yaml
@@ -0,0 +1,2 @@
+metrics:
+ juju-units: {}
diff --git a/kubeapi-load-balancer/pydocmd.yml b/kubeapi-load-balancer/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/kubeapi-load-balancer/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/kubeapi-load-balancer/pyproject.toml b/kubeapi-load-balancer/pyproject.toml
new file mode 100644
index 0000000..db0dcd0
--- /dev/null
+++ b/kubeapi-load-balancer/pyproject.toml
@@ -0,0 +1,3 @@
+[tool.black]
+line-length=120
+target-version=['py35']
diff --git a/kubeapi-load-balancer/reactive/__init__.py b/kubeapi-load-balancer/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/reactive/apt.py b/kubeapi-load-balancer/reactive/apt.py
new file mode 100644
index 0000000..8832296
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/apt.py
@@ -0,0 +1,158 @@
+# Copyright 2015-2020 Canonical Ltd.
+#
+# This file is part of the Apt layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+'''
+charms.reactive helpers for dealing with deb packages.
+
+Add apt package sources using add_source(). Queue deb packages for
+installation with install(). Configure and work with your software
+once the apt.installed.{packagename} flag is set.
+'''
+import os.path
+import subprocess
+import re
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
+from charms import layer
+from charms.layer import status
+from charms import reactive
+from charms.reactive import when, when_not
+
+import charms.apt
+
+
+@when('apt.needs_update')
+def update():
+ charms.apt.update()
+
+
+@when('apt.queued_installs')
+@when_not('apt.needs_update')
+def install_queued():
+ charms.apt.install_queued()
+
+
+@when_not('apt.queued_installs')
+def ensure_package_status():
+ charms.apt.ensure_package_status()
+
+
+def filter_installed_packages(packages):
+ # Don't use fetch.filter_installed_packages, as it depends on python-apt
+ # and not available if the basic layer's use_site_packages option is off
+ cmd = ['dpkg-query', '--show', r'--showformat=${Package}\n']
+ installed = set(subprocess.check_output(cmd, universal_newlines=True).split())
+
+ # list of packages that are not installed
+ not_installed = set(packages) - installed
+
+ # now we want to check for any regex in the installation of the packages
+ not_installed_iterable = not_installed.copy()
+ for pkg in not_installed_iterable:
+ # grab the pattern that we want to match against the packages
+ p = re.compile(pkg)
+ for pkg2 in installed:
+ matched = p.search(pkg2)
+ if matched:
+ not_installed.remove(pkg)
+ break
+
+ return not_installed
+
+
+def clear_removed_package_flags():
+ """On hook startup, clear install flags for removed packages."""
+ removed = filter_installed_packages(charms.apt.installed())
+ if removed:
+ hookenv.log('{} missing packages ({})'.format(len(removed), ','.join(removed)), WARNING)
+ for package in removed:
+ reactive.clear_flag('apt.installed.{}'.format(package))
+
+
+def add_implicit_signing_keys():
+ """Add keys specified in layer.yaml
+
+ The charm can ship trusted keys, avoiding the need to specify
+ them in config.yaml. We need to add them before we attempt
+ to add any custom sources, or apt will block under Bionic
+ if we attempt to add a source before the key becomes trusted.
+ """
+ opts = layer.options()
+ if 'apt' not in opts or 'keys' not in opts['apt']:
+ return
+ keys = opts['apt']['keys']
+ for p in keys:
+ full_p = os.path.join(hookenv.charm_dir(), p)
+ if os.path.exists(full_p):
+ hookenv.log("Adding key {}".format(p), DEBUG)
+ subprocess.check_call(
+ ['apt-key', 'add', full_p],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ else:
+ hookenv.log('Key {!r} does not exist'.format(full_p), ERROR)
+
+
+def configure_sources():
+ """Add user specified package sources from the service configuration.
+
+ See charmhelpers.fetch.configure_sources for details.
+ """
+ config = hookenv.config()
+
+ # We don't have enums, so we need to validate this ourselves.
+ package_status = config.get('package_status') or ''
+ if package_status not in ('hold', 'install'):
+ status.blocked('Unknown package_status {}'.format(package_status))
+ # Die before further hooks are run. This isn't very nice, but
+ # there is no other way to inform the operator that they have
+ # invalid configuration.
+ raise SystemExit(0)
+
+ sources = config.get('install_sources') or ''
+ keys = config.get('install_keys') or ''
+ if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)):
+ fetch.configure_sources(update=False, sources_var='install_sources', keys_var='install_keys')
+ reactive.set_flag('apt.needs_update')
+
+ # Clumsy 'config.get() or' per Bug #1641362
+ extra_packages = sorted((config.get('extra_packages') or '').split())
+ if extra_packages:
+ charms.apt.queue_install(extra_packages)
+
+
+def queue_layer_packages():
+ """Add packages listed in build-time layer options."""
+ # Both basic and apt layer. basic layer will have already installed
+ # its defined packages, but rescheduling it here gets the apt layer
+ # flag set and they will pinned as any other apt layer installed
+ # package.
+ opts = layer.options()
+ for section in ['basic', 'apt']:
+ if section in opts and 'packages' in opts[section]:
+ charms.apt.queue_install(opts[section]['packages'])
+
+
+hookenv.atstart(hookenv.log, 'Initializing Apt Layer')
+hookenv.atstart(clear_removed_package_flags)
+hookenv.atstart(add_implicit_signing_keys)
+hookenv.atstart(configure_sources)
+hookenv.atstart(queue_layer_packages)
+hookenv.atstart(charms.apt.reset_application_version)
diff --git a/kubeapi-load-balancer/reactive/hacluster.py b/kubeapi-load-balancer/reactive/hacluster.py
new file mode 100644
index 0000000..f921f76
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/hacluster.py
@@ -0,0 +1,110 @@
+from charms import layer
+
+from charms.reactive import hook
+from charms.reactive import when, when_not, clear_flag, set_flag, is_flag_set
+from charms.reactive import endpoint_from_flag
+
+from charms.layer.kubernetes_common import get_ingress_address
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+db = unitdata.kv()
+
+
+@hook('upgrade-charm')
+def do_upgrade():
+ # bump the services from upstart to systemd. :-/
+ hacluster = endpoint_from_flag('ha.connected')
+ if not hacluster:
+ return
+
+ if not is_flag_set('layer-hacluster.upgraded-systemd'):
+ services = db.get('layer-hacluster.services', {'current_services': {},
+ 'desired_services': {},
+ 'deleted_services': {}})
+ for name, service in services['current_services'].items():
+ hookenv.log("changing service {} to systemd service".format(name))
+ hacluster.remove_init_service(name, service)
+ hacluster.add_systemd_service(name, service)
+
+ # change any pending lsb entries to systemd
+ for name, service in services['desired_services'].items():
+ msg = "changing pending service {} to systemd service"
+ hookenv.log(msg.format(name))
+ hacluster.remove_init_service(name, service)
+ hacluster.add_systemd_service(name, service)
+
+ clear_flag('layer-hacluster.configured')
+ set_flag('layer-hacluster.upgraded-systemd')
+
+
+@when('ha.connected', 'layer.hacluster.services_configured')
+@when_not('layer-hacluster.configured')
+def configure_hacluster():
+ """Configure HA resources in corosync"""
+ hacluster = endpoint_from_flag('ha.connected')
+ vips = hookenv.config('ha-cluster-vip').split()
+ dns_record = hookenv.config('ha-cluster-dns')
+ if vips and dns_record:
+ set_flag('layer-hacluster.dns_vip.invalid')
+ msg = "Unsupported configuration. " \
+ "ha-cluster-vip and ha-cluster-dns cannot both be set",
+ hookenv.log(msg)
+ return
+ else:
+ clear_flag('layer-hacluster.dns_vip.invalid')
+ if vips:
+ for vip in vips:
+ hacluster.add_vip(hookenv.application_name(), vip)
+ elif dns_record:
+ layer_options = layer.options('hacluster')
+ binding_address = layer_options.get('binding_address')
+ ip = get_ingress_address(binding_address)
+ hacluster.add_dnsha(hookenv.application_name(), ip, dns_record,
+ 'public')
+
+ services = db.get('layer-hacluster.services', {'current_services': {},
+ 'desired_services': {},
+ 'deleted_services': {}})
+ for name, service in services['deleted_services'].items():
+ hacluster.remove_systemd_service(name, service)
+ for name, service in services['desired_services'].items():
+ hacluster.add_systemd_service(name, service)
+ services['current_services'][name] = service
+
+ services['deleted_services'] = {}
+ services['desired_services'] = {}
+
+ hacluster.bind_resources()
+ set_flag('layer-hacluster.configured')
+
+
+@when('config.changed.ha-cluster-vip',
+ 'ha.connected')
+def update_vips():
+ hacluster = endpoint_from_flag('ha.connected')
+ config = hookenv.config()
+ original_vips = set(config.previous('ha-cluster-vip').split())
+ new_vips = set(config['ha-cluster-vip'].split())
+ old_vips = original_vips - new_vips
+
+ for vip in old_vips:
+ hacluster.remove_vip(hookenv.application_name(), vip)
+
+ clear_flag('layer-hacluster.configured')
+
+
+@when('config.changed.ha-cluster-dns',
+ 'ha.connected')
+def update_dns():
+ hacluster = endpoint_from_flag('ha.connected')
+ config = hookenv.config()
+ original_dns = set(config.previous('ha-cluster-dns').split())
+ new_dns = set(config['ha-cluster-dns'].split())
+ old_dns = original_dns - new_dns
+
+ for dns in old_dns:
+ hacluster.remove_dnsha(hookenv.application_name, 'public')
+
+ clear_flag('layer-hacluster.configured')
diff --git a/kubeapi-load-balancer/reactive/load_balancer.py b/kubeapi-load-balancer/reactive/load_balancer.py
new file mode 100644
index 0000000..e6c35ac
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/load_balancer.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python
+
+# Copyright 2015 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import socket
+import subprocess
+
+from pathlib import Path
+
+from charms.reactive import when, when_any, when_not
+from charms.reactive import set_flag, is_state
+from charms.reactive import hook
+from charms.reactive import clear_flag, endpoint_from_flag
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charms.layer import nginx
+from charms.layer import tls_client
+from charms.layer import status
+from charms.layer import kubernetes_common
+from charms.layer.hacluster import add_service_to_hacluster
+from charms.layer.hacluster import remove_service_from_hacluster
+
+from subprocess import Popen
+from subprocess import PIPE
+from subprocess import STDOUT
+from subprocess import CalledProcessError
+
+
+apilb_nginx = """/var/log/nginx.*.log {
+ daily
+ missingok
+ rotate 14
+ compress
+ delaycompress
+ notifempty
+ create 0640 www-data adm
+ sharedscripts
+ prerotate
+ if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\
+ run-parts /etc/logrotate.d/httpd-prerotate; \\
+ fi \\
+ endscript
+ postrotate
+ invoke-rc.d nginx rotate >/dev/null 2>&1
+ endscript
+}"""
+
+cert_dir = Path('/srv/kubernetes/')
+server_crt_path = cert_dir / 'server.crt'
+server_key_path = cert_dir / 'server.key'
+
+
+@when('certificates.available', 'website.available')
+def request_server_certificates():
+ '''Send the data that is required to create a server certificate for
+ this server.'''
+ website = endpoint_from_flag('website.available')
+ # Use the public ip of this unit as the Common Name for the certificate.
+ common_name = hookenv.unit_public_ip()
+
+ bind_ips = kubernetes_common.get_bind_addrs(ipv4=True, ipv6=True)
+
+ # Create SANs that the tls layer will add to the server cert.
+ sans = [
+ # The CN field is checked as a hostname, so if it's an IP, it
+ # won't match unless also included in the SANs as an IP field.
+ common_name,
+ kubernetes_common.get_ingress_address(website.endpoint_name),
+ socket.gethostname(),
+ socket.getfqdn(),
+ ] + bind_ips
+ forced_lb_ips = hookenv.config('loadbalancer-ips').split()
+ if forced_lb_ips:
+ sans.extend(forced_lb_ips)
+ else:
+ hacluster = endpoint_from_flag('ha.connected')
+ if hacluster:
+ vips = hookenv.config('ha-cluster-vip').split()
+ dns_record = hookenv.config('ha-cluster-dns')
+ if vips:
+ sans.extend(vips)
+ elif dns_record:
+ sans.append(dns_record)
+
+ # maybe they have extra names they want as SANs
+ extra_sans = hookenv.config('extra_sans')
+ if extra_sans and not extra_sans == "":
+ sans.extend(extra_sans.split())
+ # Request a server cert with this information.
+ tls_client.request_server_cert(common_name, sorted(set(sans)),
+ crt_path=server_crt_path,
+ key_path=server_key_path)
+
+
+@when('config.changed.extra_sans', 'certificates.available',
+ 'website.available')
+def update_certificate():
+ # Using the config.changed.extra_sans flag to catch changes.
+ # IP changes will take ~5 minutes or so to propagate, but
+ # it will update.
+ request_server_certificates()
+
+
+@when('certificates.server.cert.available',
+ 'nginx.available')
+@when_any('tls_client.certs.changed',
+ 'tls_client.ca.written')
+def kick_nginx(tls):
+ # certificate changed, so sighup nginx
+ hookenv.log("Certificate information changed, sending SIGHUP to nginx")
+ host.service_restart('nginx')
+ clear_flag('tls_client.certs.changed')
+ clear_flag('tls_client.ca.written')
+
+
+@when('config.changed.port')
+def close_old_port():
+ config = hookenv.config()
+ old_port = config.previous('port')
+ if not old_port:
+ return
+ try:
+ hookenv.close_port(old_port)
+ except CalledProcessError:
+ hookenv.log('Port %d already closed, skipping.' % old_port)
+
+
+def maybe_write_apilb_logrotate_config():
+ filename = '/etc/logrotate.d/apilb_nginx'
+ if not os.path.exists(filename):
+ # Set log rotation for apilb log file
+ with open(filename, 'w+') as fp:
+ fp.write(apilb_nginx)
+
+
+@when('nginx.available', 'apiserver.available',
+ 'tls_client.certs.saved')
+@when_not('upgrade.series.in-progress')
+def install_load_balancer():
+ ''' Create the default vhost template for load balancing '''
+ apiserver = endpoint_from_flag('apiserver.available')
+ # Do both the key and certificate exist?
+ if server_crt_path.exists() and server_key_path.exists():
+ # At this point the cert and key exist, and they are owned by root.
+ chown = ['chown', 'www-data:www-data', str(server_crt_path)]
+
+ # Change the owner to www-data so the nginx process can read the cert.
+ subprocess.call(chown)
+ chown = ['chown', 'www-data:www-data', str(server_key_path)]
+
+ # Change the owner to www-data so the nginx process can read the key.
+ subprocess.call(chown)
+
+ port = hookenv.config('port')
+ hookenv.open_port(port)
+ services = apiserver.services()
+ nginx.configure_site(
+ 'apilb',
+ 'apilb.conf',
+ server_name='_',
+ services=services,
+ port=port,
+ server_certificate=str(server_crt_path),
+ server_key=str(server_key_path),
+ proxy_read_timeout=hookenv.config('proxy_read_timeout')
+ )
+
+ maybe_write_apilb_logrotate_config()
+ status.active('Loadbalancer ready.')
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ if is_state('certificates.available') and is_state('website.available'):
+ request_server_certificates()
+ maybe_write_apilb_logrotate_config()
+
+
+@hook('pre-series-upgrade')
+def pre_series_upgrade():
+ host.service_pause('nginx')
+ status.blocked('Series upgrade in progress')
+
+
+@hook('post-series-upgrade')
+def post_series_upgrade():
+ host.service_resume('nginx')
+
+
+@when('nginx.available')
+def set_nginx_version():
+ ''' Surface the currently deployed version of nginx to Juju '''
+ cmd = 'nginx -v'
+ p = Popen(cmd, shell=True,
+ stdin=PIPE,
+ stdout=PIPE,
+ stderr=STDOUT,
+ close_fds=True)
+ raw = p.stdout.read()
+ # The version comes back as:
+ # nginx version: nginx/1.10.0 (Ubuntu)
+ version = raw.split(b'/')[-1].split(b' ')[0]
+ hookenv.application_version_set(version.rstrip())
+
+
+@when('website.available')
+def provide_application_details():
+ ''' re-use the nginx layer website relation to relay the hostname/port
+ to any consuming kubernetes-workers, or other units that require the
+ kubernetes API '''
+ website = endpoint_from_flag('website.available')
+ hacluster = endpoint_from_flag('ha.connected')
+ forced_lb_ips = hookenv.config('loadbalancer-ips').split()
+ address = None
+ if forced_lb_ips:
+ address = forced_lb_ips
+ elif hacluster:
+ # in the hacluster world, we dump the vip or the dns
+ # on every unit's data. This is because the
+ # kubernetes-master charm just grabs the first
+ # one it sees and uses that ip/dns.
+ vips = hookenv.config('ha-cluster-vip').split()
+ dns_record = hookenv.config('ha-cluster-dns')
+ if vips:
+ address = vips
+ elif dns_record:
+ address = dns_record
+ if address:
+ website.configure(port=hookenv.config('port'),
+ private_address=address,
+ hostname=address)
+ else:
+ website.configure(port=hookenv.config('port'))
+
+
+@when('loadbalancer.available')
+def provide_loadbalancing():
+ '''Send the public address and port to the public-address interface, so
+ the subordinates can get the public address of this loadbalancer.'''
+ loadbalancer = endpoint_from_flag('loadbalancer.available')
+ hacluster = endpoint_from_flag('ha.connected')
+ forced_lb_ips = hookenv.config('loadbalancer-ips').split()
+ if forced_lb_ips:
+ address = forced_lb_ips
+ elif hacluster:
+ # in the hacluster world, we dump the vip or the dns
+ # on every unit's data. This is because the
+ # kubernetes-master charm just grabs the first
+ # one it sees and uses that ip/dns.
+ vips = hookenv.config('ha-cluster-vip').split()
+ dns_record = hookenv.config('ha-cluster-dns')
+ if vips:
+ address = vips
+ elif dns_record:
+ address = dns_record
+ else:
+ address = hookenv.unit_get('public-address')
+ else:
+ address = hookenv.unit_get('public-address')
+ loadbalancer.set_address_port(address, hookenv.config('port'))
+
+
+@when('nrpe-external-master.available')
+@when_not('nrpe-external-master.initial-config')
+def initial_nrpe_config(nagios=None):
+ set_flag('nrpe-external-master.initial-config')
+ update_nrpe_config(nagios)
+
+
+@when('nginx.available')
+@when('nrpe-external-master.available')
+@when_any('config.changed.nagios_context',
+ 'config.changed.nagios_servicegroups')
+def update_nrpe_config(unused=None):
+ services = ('nginx',)
+
+ hostname = nrpe.get_nagios_hostname()
+ current_unit = nrpe.get_nagios_unit_name()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+ nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
+ nrpe_setup.write()
+
+
+@when_not('nrpe-external-master.available')
+@when('nrpe-external-master.initial-config')
+def remove_nrpe_config(nagios=None):
+ clear_flag('nrpe-external-master.initial-config')
+
+ # List of systemd services for which the checks will be removed
+ services = ('nginx',)
+
+ # The current nrpe-external-master interface doesn't handle a lot of logic,
+ # use the charm-helpers code for now.
+ hostname = nrpe.get_nagios_hostname()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+
+ for service in services:
+ nrpe_setup.remove_check(shortname=service)
+
+
+@when('nginx.available', 'ha.connected')
+def configure_hacluster():
+ add_service_to_hacluster('nginx', 'nginx')
+ set_flag('hacluster-configured')
+
+
+@when_not('ha.connected')
+@when('hacluster-configured')
+def remove_hacluster():
+ remove_service_from_hacluster('nginx', 'nginx')
+ clear_flag('hacluster-configured')
diff --git a/kubeapi-load-balancer/reactive/nginx.py b/kubeapi-load-balancer/reactive/nginx.py
new file mode 100644
index 0000000..9a93006
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/nginx.py
@@ -0,0 +1,33 @@
+from charms.reactive import (
+ set_state,
+ when_not,
+ when
+)
+
+from charms.layer import nginx
+
+from charmhelpers.core import hookenv
+
+config = hookenv.config()
+
+
+# Handlers --------------------------------------------------------------------
+@when('apt.installed.nginx-full')
+@when_not('nginx.available')
+def nginx_ready():
+ nginx.remove_default_site()
+ hookenv.status_set('active', 'NGINX is ready')
+ set_state('nginx.available')
+
+
+# Example website.available reaction ------------------------------------------
+"""
+This example reaction for an application layer which consumes this nginx layer.
+If left here then this reaction may overwrite your top-level reaction depending
+on service names, ie., both nginx and ghost have the same reaction method,
+however, nginx will execute since it's a higher precedence.
+
+@when('nginx.available', 'website.available')
+def configure_website(website):
+ website.configure(port=config['port'])
+"""
diff --git a/kubeapi-load-balancer/reactive/status.py b/kubeapi-load-balancer/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/kubeapi-load-balancer/reactive/tls_client.py b/kubeapi-load-balancer/reactive/tls_client.py
new file mode 100644
index 0000000..afa2228
--- /dev/null
+++ b/kubeapi-load-balancer/reactive/tls_client.py
@@ -0,0 +1,208 @@
+import os
+
+from pathlib import Path
+from subprocess import check_call
+
+from charms import layer
+from charms.reactive import hook
+from charms.reactive import set_state, remove_state
+from charms.reactive import when
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import endpoint_from_flag
+from charms.reactive.helpers import data_changed
+
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.core.hookenv import log
+
+
+@when('certificates.ca.available')
+def store_ca(tls):
+ '''Read the certificate authority from the relation object and install
+ the ca on this system.'''
+ # Get the CA from the relationship object.
+ certificate_authority = tls.get_ca()
+ if certificate_authority:
+ layer_options = layer.options('tls-client')
+ ca_path = layer_options.get('ca_certificate_path')
+ changed = data_changed('certificate_authority', certificate_authority)
+ if ca_path:
+ if changed or not os.path.exists(ca_path):
+ log('Writing CA certificate to {0}'.format(ca_path))
+ # ensure we have a newline at the end of the certificate.
+ # some things will blow up without one.
+ # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034
+ if not certificate_authority.endswith('\n'):
+ certificate_authority += '\n'
+ _write_file(ca_path, certificate_authority)
+ set_state('tls_client.ca.written')
+ set_state('tls_client.ca.saved')
+ if changed:
+ # Update /etc/ssl/certs and generate ca-certificates.crt
+ install_ca(certificate_authority)
+
+
+@when('certificates.server.cert.available')
+def store_server(tls):
+ '''Read the server certificate and server key from the relation object
+ and save them to the certificate directory..'''
+ server_cert, server_key = tls.get_server_cert()
+ chain = tls.get_chain()
+ if chain:
+ server_cert = server_cert + '\n' + chain
+ if server_cert and server_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('server_certificate_path')
+ key_path = layer_options.get('server_key_path')
+ cert_changed = data_changed('server_certificate', server_cert)
+ key_changed = data_changed('server_key', server_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing server certificate to {0}'.format(cert_path))
+ _write_file(cert_path, server_cert)
+ set_state('tls_client.server.certificate.written')
+ set_state('tls_client.server.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing server key to {0}'.format(key_path))
+ _write_file(key_path, server_key)
+ set_state('tls_client.server.key.saved')
+
+
+@when('certificates.client.cert.available')
+def store_client(tls):
+ '''Read the client certificate and client key from the relation object
+ and copy them to the certificate directory.'''
+ client_cert, client_key = tls.get_client_cert()
+ chain = tls.get_chain()
+ if chain:
+ client_cert = client_cert + '\n' + chain
+ if client_cert and client_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('client_certificate_path')
+ key_path = layer_options.get('client_key_path')
+ cert_changed = data_changed('client_certificate', client_cert)
+ key_changed = data_changed('client_key', client_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing client certificate to {0}'.format(cert_path))
+ _write_file(cert_path, client_cert)
+ set_state('tls_client.client.certificate.written')
+ set_state('tls_client.client.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing client key to {0}'.format(key_path))
+ _write_file(key_path, client_key)
+ set_state('tls_client.client.key.saved')
+
+
+@when('certificates.certs.changed')
+def update_certs():
+ tls = endpoint_from_flag('certificates.certs.changed')
+ certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {})
+ all_ready = True
+ any_changed = False
+ maps = {
+ 'server': tls.server_certs_map,
+ 'client': tls.client_certs_map,
+ }
+
+ if maps.get('client') == {}:
+ log(
+ 'No client certs found using maps. Checking for global \
+ client certificates.',
+ 'WARNING'
+ )
+ # Check for global certs,
+ # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819
+ cert_pair = tls.get_client_cert()
+ if cert_pair is not None:
+ for client_name in certs_paths.get('client', {}).keys():
+ maps.get('client').update({
+ client_name: cert_pair
+ })
+
+ chain = tls.get_chain()
+ for cert_type in ('server', 'client'):
+ for common_name, paths in certs_paths.get(cert_type, {}).items():
+ cert_pair = maps[cert_type].get(common_name)
+ if not cert_pair:
+ all_ready = False
+ continue
+ if not data_changed('layer.tls-client.'
+ '{}.{}'.format(cert_type, common_name), cert_pair):
+ continue
+
+ cert = None
+ key = None
+ if type(cert_pair) is not tuple:
+ if paths['crt']:
+ cert = cert_pair.cert
+ if paths['key']:
+ key = cert_pair.key
+ else:
+ cert, key = cert_pair
+
+ if cert:
+ if chain:
+ cert = cert + '\n' + chain
+ _ensure_directory(paths['crt'])
+ Path(paths['crt']).write_text(cert)
+
+ if key:
+ _ensure_directory(paths['key'])
+ Path(paths['key']).write_text(key)
+
+ any_changed = True
+ # clear flags first to ensure they are re-triggered if left set
+ clear_flag('tls_client.{}.certs.changed'.format(cert_type))
+ clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ set_flag('tls_client.{}.certs.changed'.format(cert_type))
+ set_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ if all_ready:
+ set_flag('tls_client.certs.saved')
+ if any_changed:
+ clear_flag('tls_client.certs.changed')
+ set_flag('tls_client.certs.changed')
+ clear_flag('certificates.certs.changed')
+
+
+def install_ca(certificate_authority):
+ '''Install a certificiate authority on the system by calling the
+ update-ca-certificates command.'''
+ if certificate_authority:
+ name = hookenv.service_name()
+ # Create a path to install CAs on Debian systems.
+ ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name)
+ log('Writing CA certificate to {0}'.format(ca_path))
+ _write_file(ca_path, certificate_authority)
+ # Update the trusted CAs on this system (a time expensive operation).
+ check_call(['update-ca-certificates'])
+ log('Generated ca-certificates.crt for {0}'.format(name))
+ set_state('tls_client.ca_installed')
+
+
+@hook('upgrade-charm')
+def remove_states():
+ remove_state('tls_client.ca.saved')
+ remove_state('tls_client.server.certificate.saved')
+ remove_state('tls_client.server.key.saved')
+ remove_state('tls_client.client.certificate.saved')
+ remove_state('tls_client.client.key.saved')
+
+
+def _ensure_directory(path):
+ '''Ensure the parent directory exists creating directories if necessary.'''
+ directory = os.path.dirname(path)
+ if not os.path.isdir(directory):
+ os.makedirs(directory)
+ os.chmod(directory, 0o770)
+
+
+def _write_file(path, content):
+ '''Write the path to a file.'''
+ _ensure_directory(path)
+ with open(path, 'w') as stream:
+ stream.write(content)
+ os.chmod(path, 0o440)
diff --git a/kubeapi-load-balancer/requirements.txt b/kubeapi-load-balancer/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/kubeapi-load-balancer/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/kubeapi-load-balancer/revision b/kubeapi-load-balancer/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/kubeapi-load-balancer/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/kubeapi-load-balancer/script/bootstrap b/kubeapi-load-balancer/script/bootstrap
new file mode 100644
index 0000000..c883e4c
--- /dev/null
+++ b/kubeapi-load-balancer/script/bootstrap
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -x
+
+sudo apt update
+sudo snap install charm --classic
+sudo snap install yq
diff --git a/kubeapi-load-balancer/script/build b/kubeapi-load-balancer/script/build
new file mode 100644
index 0000000..6bbbc48
--- /dev/null
+++ b/kubeapi-load-balancer/script/build
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm build -r --force -o "$CHARM_BUILD_DIR"
diff --git a/kubeapi-load-balancer/script/upload b/kubeapi-load-balancer/script/upload
new file mode 100644
index 0000000..b8bd049
--- /dev/null
+++ b/kubeapi-load-balancer/script/upload
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm whoami
+RET=$?
+if ((RET > 0)); then
+ echo "Not logged into charmstore"
+ exit 1
+fi
+
+URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url)
+
+if [ "$CHANNEL" != unpublished ]; then
+ charm release "$URL" --channel "$CHANNEL"
+fi
diff --git a/kubeapi-load-balancer/templates/.gitkeep b/kubeapi-load-balancer/templates/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/kubeapi-load-balancer/templates/apilb.conf b/kubeapi-load-balancer/templates/apilb.conf
new file mode 100644
index 0000000..17bc61f
--- /dev/null
+++ b/kubeapi-load-balancer/templates/apilb.conf
@@ -0,0 +1,42 @@
+{% for app in services -%}
+upstream target_service {
+ {% for host in app['hosts'] -%}
+ server {{ host['hostname'] }}:{{ host['port'] }};
+ {% endfor %}
+}
+{% endfor %}
+
+
+server {
+ listen {{ port }} ssl http2;
+ listen [::]:{{ port }} ssl http2 ipv6only=on;
+ server_name {{ server_name }};
+
+ access_log /var/log/nginx.access.log;
+ error_log /var/log/nginx.error.log;
+
+ ssl on;
+ ssl_session_cache builtin:1000 shared:SSL:10m;
+ ssl_certificate {{ server_certificate }};
+ ssl_certificate_key {{ server_key }};
+ ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
+ ssl_prefer_server_ciphers on;
+
+
+ location / {
+ proxy_buffering off;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Proto-Version $http2;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $http_connection;
+ proxy_set_header X-Stream-Protocol-Version $http_x_stream_protocol_version;
+
+ add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version;
+
+ proxy_pass https://target_service;
+ proxy_read_timeout {{ proxy_read_timeout }};
+ }
+}
diff --git a/kubeapi-load-balancer/templates/vhost.conf.ex b/kubeapi-load-balancer/templates/vhost.conf.ex
new file mode 100644
index 0000000..253be36
--- /dev/null
+++ b/kubeapi-load-balancer/templates/vhost.conf.ex
@@ -0,0 +1,18 @@
+server {
+ listen 80;
+
+ server_name {{server_name}};
+
+ location / {
+ proxy_redirect off;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-NginX-Proxy true;
+ proxy_set_header Connection "";
+ proxy_http_version 1.1;
+ proxy_pass http://{{host}}:{{port}};
+
+ }
+}
diff --git a/kubeapi-load-balancer/tests/conftest.py b/kubeapi-load-balancer/tests/conftest.py
new file mode 100644
index 0000000..a5c275b
--- /dev/null
+++ b/kubeapi-load-balancer/tests/conftest.py
@@ -0,0 +1,4 @@
+import charms.unit_test
+
+charms.unit_test.patch_reactive()
+charms.unit_test.patch_module('subprocess')
diff --git a/kubeapi-load-balancer/tests/test_kubeapi_load_balancer.py b/kubeapi-load-balancer/tests/test_kubeapi_load_balancer.py
new file mode 100644
index 0000000..4ea2763
--- /dev/null
+++ b/kubeapi-load-balancer/tests/test_kubeapi_load_balancer.py
@@ -0,0 +1,17 @@
+from charmhelpers.core import host # patched
+
+from reactive import load_balancer as handlers
+
+
+def test_series_upgrade():
+ assert host.service_pause.call_count == 0
+ assert host.service_resume.call_count == 0
+ assert handlers.status.blocked.call_count == 0
+ handlers.pre_series_upgrade()
+ assert host.service_pause.call_count == 1
+ assert host.service_resume.call_count == 0
+ assert handlers.status.blocked.call_count == 1
+ handlers.post_series_upgrade()
+ assert host.service_pause.call_count == 1
+ assert host.service_resume.call_count == 1
+ assert handlers.status.blocked.call_count == 1
diff --git a/kubeapi-load-balancer/tox.ini b/kubeapi-load-balancer/tox.ini
new file mode 100644
index 0000000..76fa574
--- /dev/null
+++ b/kubeapi-load-balancer/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+skipsdist = True
+envlist = lint,py3
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}:{toxinidir}/lib
+deps =
+ pytest
+ flake8
+ ipdb
+ git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test
+commands = pytest --tb native -s {posargs}
+
+[testenv:lint]
+envdir = {toxworkdir}/py3
+commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests
diff --git a/kubeapi-load-balancer/version b/kubeapi-load-balancer/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/kubeapi-load-balancer/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/kubeapi-load-balancer/wheelhouse.txt b/kubeapi-load-balancer/wheelhouse.txt
new file mode 100644
index 0000000..f9c2fab
--- /dev/null
+++ b/kubeapi-load-balancer/wheelhouse.txt
@@ -0,0 +1,19 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
+# layer:nginx
+toml
+
diff --git a/kubeapi-load-balancer/wheelhouse/Jinja2-2.10.1.tar.gz b/kubeapi-load-balancer/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/MarkupSafe-1.1.1.tar.gz b/kubeapi-load-balancer/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/PyYAML-5.2.tar.gz b/kubeapi-load-balancer/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/Tempita-0.5.2.tar.gz b/kubeapi-load-balancer/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/charms.reactive-1.4.1.tar.gz b/kubeapi-load-balancer/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz b/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/pbr-5.6.0.tar.gz b/kubeapi-load-balancer/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz b/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz b/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip b/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz b/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz b/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz
new file mode 100644
index 0000000..41dd278
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz differ
diff --git a/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz b/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/kubernetes-master/.build.manifest b/kubernetes-master/.build.manifest
new file mode 100644
index 0000000..bd4370c
--- /dev/null
+++ b/kubernetes-master/.build.manifest
@@ -0,0 +1,2676 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "85d7cc4f7180d19df20e264358e920004cec192b",
+ "url": "layer:snap"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fb46dec78d390571753d21876bbba689bbbca9e4",
+ "url": "layer:tls-client"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
+ "url": "layer:leadership"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46",
+ "url": "layer:metrics"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e",
+ "url": "layer:nagios"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688",
+ "url": "layer:cdk-service-kicker"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "023c67941e18663a4df49f53edba809f43ba5069",
+ "url": "layer:cis-benchmark"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf",
+ "url": "layer:coordinator"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "763297a075b3654f261af20c84b940d87f55354e",
+ "url": "layer:kubernetes-common"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "809f06c6f6521be59e21859eaebeccd13f4d8c28",
+ "url": "layer:kubernetes-master-worker-base"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "60f82079cd9b312d17cb67bf797b6a23d27398f3",
+ "url": "layer:vault-kv"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34",
+ "url": "layer:apt"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2c4c16cd9e4254494d79aac1d17eacf1620d1b0f",
+ "url": "layer:vaultlocker"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a8f88f16bb7771807a0f7fdb17ee16b0e310fc2b",
+ "url": "layer:hacluster"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "7946456765a3774e1cab44d124e50cbaa294cf1c",
+ "url": "kubernetes-master"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d9850016d930a6d507b9fd45e2598d327922b140",
+ "url": "interface:tls-certificates"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc",
+ "url": "interface:nrpe-external-master"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "6f927f10b97f45c566481cf57a29d433f17373e1",
+ "url": "interface:container-runtime"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "dceab99ac3739cc7265e386287f100f1bfebc47f",
+ "url": "interface:vault-kv"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "6c611a3c61909fda411f7a79af53908ec7bef2c8",
+ "url": "interface:hacluster"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "c1061a29297084fa53c2474ba371671186ff3389",
+ "url": "interface:ceph-admin"
+ },
+ {
+ "branch": "(HEAD detached at e247aeff)",
+ "rev": "e247aeff0147756f5c70813d966b3865d0435d20",
+ "url": "interface:ceph-client"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382",
+ "url": "interface:etcd"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09",
+ "url": "interface:http"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b",
+ "url": "interface:kubernetes-cni"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "9bc32742b7720a755ada9526424e5d80092e1536",
+ "url": "interface:kube-dns"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2236a52be495a45b8f492bae37bbba50e468ef42",
+ "url": "interface:kube-control"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "8e486e329dd12f70c4220874c795c0f0280d99ae",
+ "url": "interface:kube-masters"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27",
+ "url": "interface:public-address"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7",
+ "url": "interface:aws-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb",
+ "url": "interface:gcp-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "0d7a994f04b9e92ed847829ce8349b1a9c672e47",
+ "url": "interface:openstack-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b",
+ "url": "interface:vsphere-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "45b79107f7bd5f14b3b956d1f45f659a567b0999",
+ "url": "interface:azure-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "53e93b8820899f2251d207ed5d5c3b212ceb64de",
+ "url": "interface:keystone-credentials"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "3f775242c16d53243c993d7ba0c896169ad1639e",
+ "url": "interface:prometheus-manual"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "e64261e281f012a00d374c6779ec52e488cb8713",
+ "url": "interface:grafana-dashboard"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "e9a8c168b81b687790119dd6df2e7a4c1f729c41",
+ "url": "interface:aws-iam"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/main.yml": [
+ "kubernetes-master",
+ "static",
+ "26cb9b176329c7e49f6dea57523397f362c2591cbb409bbe099b04ecca0d2401"
+ ],
+ ".gitignore": [
+ "kubernetes-master",
+ "static",
+ "2d275519f0da994b9db61e63c8010917da1f4779199f18f1012f23f9b16b353e"
+ ],
+ ".travis.yml": [
+ "layer:cis-benchmark",
+ "static",
+ "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "CONTRIBUTING.md": [
+ "kubernetes-master",
+ "static",
+ "dc83e4e868d1dbe5b1404faf736d556895a7d4ca9be3bff2d1fdebc0036993d6"
+ ],
+ "LICENSE": [
+ "kubernetes-master",
+ "static",
+ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
+ ],
+ "Makefile": [
+ "layer:basic",
+ "static",
+ "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
+ ],
+ "README.md": [
+ "kubernetes-master",
+ "static",
+ "7ef5c5ab3a462939739fe83c1d7f70154ec763d452e33b7fb4f1af595b67b2cd"
+ ],
+ "actions.yaml": [
+ "kubernetes-master",
+ "dynamic",
+ "18322aaa6c607fb92176d0706335ec94260e4d79d525291b91beba5b689d599d"
+ ],
+ "actions/apply-manifest": [
+ "kubernetes-master",
+ "static",
+ "1e2058489bd361db33cbb5bd66915d2747518d424dad7fac5011bf10c3fe070d"
+ ],
+ "actions/cis-benchmark": [
+ "layer:cis-benchmark",
+ "static",
+ "fd3c1b8ba478b7f933605897ace8ae9f3ee102d9992f46f1e36d95eb1b094b84"
+ ],
+ "actions/create-rbd-pv": [
+ "kubernetes-master",
+ "static",
+ "b962c4e9472c8bc2fb3c86eacdb109293e2b251ae1c80cee29f19549032b73b3"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "actions/get-kubeconfig": [
+ "kubernetes-master",
+ "static",
+ "1e2058489bd361db33cbb5bd66915d2747518d424dad7fac5011bf10c3fe070d"
+ ],
+ "actions/kubectl-actions.py": [
+ "kubernetes-master",
+ "static",
+ "1e2058489bd361db33cbb5bd66915d2747518d424dad7fac5011bf10c3fe070d"
+ ],
+ "actions/namespace-create": [
+ "kubernetes-master",
+ "static",
+ "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c"
+ ],
+ "actions/namespace-delete": [
+ "kubernetes-master",
+ "static",
+ "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c"
+ ],
+ "actions/namespace-list": [
+ "kubernetes-master",
+ "static",
+ "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c"
+ ],
+ "actions/restart": [
+ "kubernetes-master",
+ "static",
+ "72cb46d4971f057fdbbc901599a735a7ce3d61e7ae9b2687c9e9b4cd478e26d0"
+ ],
+ "actions/upgrade": [
+ "kubernetes-master",
+ "static",
+ "a155a181b259f9c70f265a73851f8cecec1174f6b810a3fdd9a0749c2a91e5a5"
+ ],
+ "actions/user-create": [
+ "kubernetes-master",
+ "static",
+ "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7"
+ ],
+ "actions/user-delete": [
+ "kubernetes-master",
+ "static",
+ "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7"
+ ],
+ "actions/user-list": [
+ "kubernetes-master",
+ "static",
+ "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7"
+ ],
+ "actions/user_actions.py": [
+ "kubernetes-master",
+ "static",
+ "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "kubernetes-master",
+ "dynamic",
+ "9bca6cd4212e06f367e3d0b13bfbf86d2e9f6c2e5bc50af19eb3293c6c337f04"
+ ],
+ "copyright": [
+ "kubernetes-master",
+ "static",
+ "1eedc4e165789729bc492abd80e34ac85dcb0ec429eebdf225129b9b0bfc3502"
+ ],
+ "copyright.layer-apt": [
+ "layer:apt",
+ "static",
+ "5123b2d0220fefb4424a463216fb41a6dd7cfad49c9799ba7037f1e74a2fd6bc"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-coordinator": [
+ "layer:coordinator",
+ "static",
+ "7d212a095a6143559fb51f26bc40c2ba24b977190f65c7e5c835104f54d5dfc5"
+ ],
+ "copyright.layer-leadership": [
+ "layer:leadership",
+ "static",
+ "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
+ ],
+ "copyright.layer-metrics": [
+ "layer:metrics",
+ "static",
+ "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c"
+ ],
+ "copyright.layer-nagios": [
+ "layer:nagios",
+ "static",
+ "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-snap": [
+ "layer:snap",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "copyright.layer-status": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "copyright.layer-vault-kv": [
+ "layer:vault-kv",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "copyright.layer-vaultlocker": [
+ "layer:vaultlocker",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "debug-scripts/auth-webhook": [
+ "kubernetes-master",
+ "static",
+ "08185f547fe131bf7ecd7d65fd7cfaa246f613e9ccd6fcf666eb02f6d987a7e8"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/kubectl": [
+ "kubernetes-master",
+ "static",
+ "696848b11b760ab278b02b650ffda2adc8ba75c6701d574bdec0a7a1a75aea7e"
+ ],
+ "debug-scripts/kubernetes-master-services": [
+ "kubernetes-master",
+ "static",
+ "f9930483765f715098c7e6a6b21e08105aff7dea4ecddc68fb8b6480951242d3"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "debug-scripts/tls-certs": [
+ "layer:tls-client",
+ "static",
+ "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "docs/vault-kv.md": [
+ "layer:vault-kv",
+ "static",
+ "96d97a5ff204f4ce12efdecea33c1a118deee383c2c067bfcce760b56e00c635"
+ ],
+ "docs/vaultlocker.md": [
+ "layer:vaultlocker",
+ "static",
+ "a4dfe20b9ca14895d3b98658f5848dac61eefa62b2ea6f317ab2c2e65d151372"
+ ],
+ "exec.d/docker-compose/charm-pre-install": [
+ "layer:kubernetes-master-worker-base",
+ "static",
+ "32482c2a88209cbe512990db5fb4deabdcff88282bf7c7dd71a265383139fc77"
+ ],
+ "exec.d/vmware-patch/charm-pre-install": [
+ "kubernetes-master",
+ "static",
+ "9f98f70669ddd949ff83c7b408b678ae170bf41e4faa2828b4d66bd47acca93e"
+ ],
+ "hooks/aws-iam-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-iam-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-iam-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-iam-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-iam-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-client-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-client-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-client-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-client-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-client-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-storage-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-storage-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-storage-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-storage-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ceph-storage-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-dns-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-dns-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-dns-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-dns-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cluster-dns-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/collect-metrics": [
+ "layer:metrics",
+ "static",
+ "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-changed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/coordinator-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-departed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/coordinator-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/dns-provider-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/dns-provider-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/dns-provider-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/dns-provider-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/dns-provider-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/etcd-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/grafana-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/grafana-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/grafana-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/grafana-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/grafana-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ha-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/keystone-credentials-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/keystone-credentials-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/keystone-credentials-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/keystone-credentials-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/keystone-credentials-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-masters-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-masters-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-masters-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-masters-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-masters-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/loadbalancer-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/prometheus-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/prometheus-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/prometheus-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/prometheus-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/prometheus-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/aws-iam/LICENSE": [
+ "interface:aws-iam",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/aws-iam/README.md": [
+ "interface:aws-iam",
+ "static",
+ "18f660ddbbfe1f4b27733397391d994b95a5b3d94d75dcaa519482a2af76e9d2"
+ ],
+ "hooks/relations/aws-iam/__init__.py": [
+ "interface:aws-iam",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/aws-iam/interface.yaml": [
+ "interface:aws-iam",
+ "static",
+ "d8d9324026cfe5b3a1e2df4d6e8570bd2b1e8279629946170014fee352c25b12"
+ ],
+ "hooks/relations/aws-iam/provides.py": [
+ "interface:aws-iam",
+ "static",
+ "42d6f71a30608e26eccb80743948c45be2ad002bce7fd670d343953bdc639509"
+ ],
+ "hooks/relations/aws-iam/requires.py": [
+ "interface:aws-iam",
+ "static",
+ "65fab5cb68b1ba2d2cee5c11e74a4ed0002321079af4019a8dde00f83d6c8188"
+ ],
+ "hooks/relations/aws-integration/.gitignore": [
+ "interface:aws-integration",
+ "static",
+ "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b"
+ ],
+ "hooks/relations/aws-integration/LICENSE": [
+ "interface:aws-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/aws-integration/README.md": [
+ "interface:aws-integration",
+ "static",
+ "1585d72b136158ce0741fc2ce0d7710c1ec55662f846afe2e768a4708c51057e"
+ ],
+ "hooks/relations/aws-integration/__init__.py": [
+ "interface:aws-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/aws-integration/copyright": [
+ "interface:aws-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/aws-integration/docs/provides.md": [
+ "interface:aws-integration",
+ "static",
+ "a7669f49156173c27ede87105f6e65a07e1e5e41f3c154a24e1a82f307f65073"
+ ],
+ "hooks/relations/aws-integration/docs/requires.md": [
+ "interface:aws-integration",
+ "static",
+ "09553e5f07f216e5234125fdf38a21af00ab11349cdb788b21703ae72b0aeed1"
+ ],
+ "hooks/relations/aws-integration/interface.yaml": [
+ "interface:aws-integration",
+ "static",
+ "4449f48e5aaa99c0bb3e8e1c9833d11d3b20fc5f81ae1f15b6442af5ec873167"
+ ],
+ "hooks/relations/aws-integration/make_docs": [
+ "interface:aws-integration",
+ "static",
+ "b471fefc7eaa5c377d47b2b63481d6c8f4c5e9d224428efe93c5abbd13a0817d"
+ ],
+ "hooks/relations/aws-integration/provides.py": [
+ "interface:aws-integration",
+ "static",
+ "ee8f91b281d9112999f3d0e1d2ac17964fca3af5102fe5b072f3f3659b932ab7"
+ ],
+ "hooks/relations/aws-integration/pydocmd.yml": [
+ "interface:aws-integration",
+ "static",
+ "8c242cde2b2517c74de8ad6b1b90d2f6d97b2eb86c54edaf2eb8a8f7d32913e8"
+ ],
+ "hooks/relations/aws-integration/requires.py": [
+ "interface:aws-integration",
+ "static",
+ "3006d6a2607bc15507bec3e6144093c6938a51a22eee1f550d714ff702728c39"
+ ],
+ "hooks/relations/azure-integration/.gitignore": [
+ "interface:azure-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/azure-integration/LICENSE": [
+ "interface:azure-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/azure-integration/README.md": [
+ "interface:azure-integration",
+ "static",
+ "c7799dba9471709e086dcd2ea272ad7a6e33f5058d875ce2bf5b3a6939d4a1e7"
+ ],
+ "hooks/relations/azure-integration/__init__.py": [
+ "interface:azure-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/azure-integration/copyright": [
+ "interface:azure-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/azure-integration/docs/provides.md": [
+ "interface:azure-integration",
+ "static",
+ "60ae63187cac32c00d9f462f1723c9487960c728beae871f1a409c92196cc1f5"
+ ],
+ "hooks/relations/azure-integration/docs/requires.md": [
+ "interface:azure-integration",
+ "static",
+ "b01e313c8ce3d02093e851bd84d5e8b7ae77b300c4b06b5048bddc78c1ad3eb3"
+ ],
+ "hooks/relations/azure-integration/interface.yaml": [
+ "interface:azure-integration",
+ "static",
+ "cea5bfd87c278bd3f2e8dc00e654930f06d2bd91ef731a063edea14b04d9128a"
+ ],
+ "hooks/relations/azure-integration/make_docs": [
+ "interface:azure-integration",
+ "static",
+ "e76f4a64c2fdc4a9f97a57d6515b4a25f9404d7043f2792db5206bc44213927c"
+ ],
+ "hooks/relations/azure-integration/provides.py": [
+ "interface:azure-integration",
+ "static",
+ "a3a1de7f79c5f2cc37f2dff450d8e9b2ce36c63c0328bb6bedd2ade7519a7442"
+ ],
+ "hooks/relations/azure-integration/pydocmd.yml": [
+ "interface:azure-integration",
+ "static",
+ "4c17085efb4ec328891b49257413eed4d9a552eeea8e589509e48081effe51ed"
+ ],
+ "hooks/relations/azure-integration/requires.py": [
+ "interface:azure-integration",
+ "static",
+ "112bfa057cdcf91a812dea080330e9323f4d7e4b1bcacfd69b3ad95dd2274cbb"
+ ],
+ "hooks/relations/ceph-admin/.gitignore": [
+ "interface:ceph-admin",
+ "static",
+ "38da8f2fbf99eb7b9ec38ea900ed13681803bbfa3482929cfeeaec86c591aa50"
+ ],
+ "hooks/relations/ceph-admin/README.md": [
+ "interface:ceph-admin",
+ "static",
+ "805e4836c511fd78ac54e9377ac20430b736bcb96baf4d1106c6779c7c2ae4f4"
+ ],
+ "hooks/relations/ceph-admin/__init__.py": [
+ "interface:ceph-admin",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/ceph-admin/interface.yaml": [
+ "interface:ceph-admin",
+ "static",
+ "c9dc8e16173423a4a13dbfa247c48d587c08097529a7060e7cd64b75ef53e19c"
+ ],
+ "hooks/relations/ceph-admin/requires.py": [
+ "interface:ceph-admin",
+ "static",
+ "3ccb57e3d033b0f281a0ebc60d64e1bc43e6e3fd008ba089c36b40955731a372"
+ ],
+ "hooks/relations/ceph-client/README.md": [
+ "interface:ceph-client",
+ "static",
+ "475c8bff2d3041b7e22f4870bb6c8d73ccd88a53f53471dddae8ec5572b6caa2"
+ ],
+ "hooks/relations/ceph-client/__init__.py": [
+ "interface:ceph-client",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/ceph-client/interface.yaml": [
+ "interface:ceph-client",
+ "static",
+ "963f7b670b81d9ef1acc4c54a9ee4593f33c1864e199cfcb6cbf9deb15a0f0c4"
+ ],
+ "hooks/relations/ceph-client/lib/base_provides.py": [
+ "interface:ceph-client",
+ "static",
+ "749435e1ea8794722f72838c97536090bc89f423c852040c2131dfb9dc71e0f8"
+ ],
+ "hooks/relations/ceph-client/lib/base_requires.py": [
+ "interface:ceph-client",
+ "static",
+ "1e7ac024219e39ac3840a913891b17c2e32d69c2a74bad4464b4e67ef5bd80c0"
+ ],
+ "hooks/relations/ceph-client/provides.py": [
+ "interface:ceph-client",
+ "static",
+ "ede8c70822bca0fd8ec5da9586ae390afa7e14878e158081fbe2c7ce8bc2f270"
+ ],
+ "hooks/relations/ceph-client/requires.py": [
+ "interface:ceph-client",
+ "static",
+ "ddeebe898592169ffc8b54f8536ed1387981401cf43e40d90972d46bc5353dc6"
+ ],
+ "hooks/relations/container-runtime/.gitignore": [
+ "interface:container-runtime",
+ "static",
+ "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732"
+ ],
+ "hooks/relations/container-runtime/LICENSE": [
+ "interface:container-runtime",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/container-runtime/README.md": [
+ "interface:container-runtime",
+ "static",
+ "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611"
+ ],
+ "hooks/relations/container-runtime/__init__.py": [
+ "interface:container-runtime",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/container-runtime/interface.yaml": [
+ "interface:container-runtime",
+ "static",
+ "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6"
+ ],
+ "hooks/relations/container-runtime/provides.py": [
+ "interface:container-runtime",
+ "static",
+ "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06"
+ ],
+ "hooks/relations/container-runtime/requires.py": [
+ "interface:container-runtime",
+ "static",
+ "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761"
+ ],
+ "hooks/relations/coordinator/peers.py": [
+ "layer:coordinator",
+ "static",
+ "d615c442396422a30a0c5f7639750d15bb59247ae5d9362c4f5dc8dd2cc7fff2"
+ ],
+ "hooks/relations/etcd/.gitignore": [
+ "interface:etcd",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/etcd/README.md": [
+ "interface:etcd",
+ "static",
+ "93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386"
+ ],
+ "hooks/relations/etcd/__init__.py": [
+ "interface:etcd",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/etcd/interface.yaml": [
+ "interface:etcd",
+ "static",
+ "ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be"
+ ],
+ "hooks/relations/etcd/peers.py": [
+ "interface:etcd",
+ "static",
+ "99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834"
+ ],
+ "hooks/relations/etcd/provides.py": [
+ "interface:etcd",
+ "static",
+ "3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9"
+ ],
+ "hooks/relations/etcd/requires.py": [
+ "interface:etcd",
+ "static",
+ "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
+ ],
+ "hooks/relations/gcp-integration/.gitignore": [
+ "interface:gcp-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/gcp-integration/LICENSE": [
+ "interface:gcp-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/gcp-integration/README.md": [
+ "interface:gcp-integration",
+ "static",
+ "dab3f4a03f02dec0095883054780e3e3f1bf63262b06a9fd499364a3db8b1e97"
+ ],
+ "hooks/relations/gcp-integration/__init__.py": [
+ "interface:gcp-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/gcp-integration/copyright": [
+ "interface:gcp-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/gcp-integration/docs/provides.md": [
+ "interface:gcp-integration",
+ "static",
+ "a67cda4094b4d601c8de63cf099ba2e83fecf3a8382e88f44e58b98be8872fa6"
+ ],
+ "hooks/relations/gcp-integration/docs/requires.md": [
+ "interface:gcp-integration",
+ "static",
+ "d7e6d7dc90b74d35bf2bd10b00b3ba289ab856dc79ec51046508a85b9dda35a3"
+ ],
+ "hooks/relations/gcp-integration/interface.yaml": [
+ "interface:gcp-integration",
+ "static",
+ "368e8ade9267b905dcb2e6843e7ed61bd6d246f0b0c18942e729f546d5db2260"
+ ],
+ "hooks/relations/gcp-integration/make_docs": [
+ "interface:gcp-integration",
+ "static",
+ "5bf011da5045c31da97a67b8633d30ea90adc6c0d4d823f839fce6e07e5fe222"
+ ],
+ "hooks/relations/gcp-integration/provides.py": [
+ "interface:gcp-integration",
+ "static",
+ "839f15cf978cf94343772889846ad3e2b8375372ef25ed08036207e5608b1f48"
+ ],
+ "hooks/relations/gcp-integration/pydocmd.yml": [
+ "interface:gcp-integration",
+ "static",
+ "2d5a524cbde5ccf732b67382a85deb7c26dfb92315c30d26c2b2d5632a2a8f38"
+ ],
+ "hooks/relations/gcp-integration/requires.py": [
+ "interface:gcp-integration",
+ "static",
+ "79c75c6c76b37bc5ac486ac2e14f853223c4c603850d2f231f187ab255cbdbf0"
+ ],
+ "hooks/relations/grafana-dashboard/.gitignore": [
+ "interface:grafana-dashboard",
+ "static",
+ "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881"
+ ],
+ "hooks/relations/grafana-dashboard/LICENSE": [
+ "interface:grafana-dashboard",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/grafana-dashboard/README.md": [
+ "interface:grafana-dashboard",
+ "static",
+ "d46e6c55423b4f0e28f803702632739582f3c0fad5d0427346f210eba8879685"
+ ],
+ "hooks/relations/grafana-dashboard/__init__.py": [
+ "interface:grafana-dashboard",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/grafana-dashboard/common.py": [
+ "interface:grafana-dashboard",
+ "static",
+ "965f19c07d3475d7fe5a21235dc0cf1a27f11da9dad498d0cd1a51260b999aa3"
+ ],
+ "hooks/relations/grafana-dashboard/copyright": [
+ "interface:grafana-dashboard",
+ "static",
+ "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4"
+ ],
+ "hooks/relations/grafana-dashboard/docs/common.md": [
+ "interface:grafana-dashboard",
+ "static",
+ "ab69cc6e293b66175dfeee09707f8d02659ae5ba5b9aa4c441295a1025db12f7"
+ ],
+ "hooks/relations/grafana-dashboard/docs/provides.md": [
+ "interface:grafana-dashboard",
+ "static",
+ "626b5655ce1e9f7733c86379fe67709e840b760046d899e5d761b034f94d939e"
+ ],
+ "hooks/relations/grafana-dashboard/docs/requires.md": [
+ "interface:grafana-dashboard",
+ "static",
+ "4f78cff5a0395aff8477267e925066bfa93654eaeb4ba812c682f968171cca55"
+ ],
+ "hooks/relations/grafana-dashboard/interface.yaml": [
+ "interface:grafana-dashboard",
+ "static",
+ "97e4c9a33360708668aa0330323fe9e9e5e95fa5a1e02d4f6b8e8dc60e155b52"
+ ],
+ "hooks/relations/grafana-dashboard/provides.py": [
+ "interface:grafana-dashboard",
+ "static",
+ "cd63928094e6d34be92944ce65cb5b01ff9ba2bd9646036d006fa743a3c0fdb5"
+ ],
+ "hooks/relations/grafana-dashboard/requires.py": [
+ "interface:grafana-dashboard",
+ "static",
+ "b071b9e66a3206351f563d7a4d160499b13a6af29d80930cb01720b5974e1dd2"
+ ],
+ "hooks/relations/hacluster/.stestr.conf": [
+ "interface:hacluster",
+ "static",
+ "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774"
+ ],
+ "hooks/relations/hacluster/README.md": [
+ "interface:hacluster",
+ "static",
+ "7fad91e409c6e559cdb76d11c89c325531adc25679049a629a28c4f890755f1f"
+ ],
+ "hooks/relations/hacluster/__init__.py": [
+ "interface:hacluster",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/hacluster/common.py": [
+ "interface:hacluster",
+ "static",
+ "cd9f765e2c3ff64a592c8e144a36783e48c1033413cbece2c4f579195cb7ff5e"
+ ],
+ "hooks/relations/hacluster/copyright": [
+ "interface:hacluster",
+ "static",
+ "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893"
+ ],
+ "hooks/relations/hacluster/interface.yaml": [
+ "interface:hacluster",
+ "static",
+ "51bcf4e36b973600d567cf96783bdee3eaa6e164275f70b69e2e47e3468c8c8b"
+ ],
+ "hooks/relations/hacluster/requires.py": [
+ "interface:hacluster",
+ "static",
+ "eb752e55844ffbfddf9a98e80ac282ff832ab667c1a33b743940babbd048bb17"
+ ],
+ "hooks/relations/hacluster/test-requirements.txt": [
+ "interface:hacluster",
+ "static",
+ "2c37d84ada8578ba5ed44f99f10470710c91d370052a867541f31b5c6a357b07"
+ ],
+ "hooks/relations/http/.gitignore": [
+ "interface:http",
+ "static",
+ "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8"
+ ],
+ "hooks/relations/http/README.md": [
+ "interface:http",
+ "static",
+ "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d"
+ ],
+ "hooks/relations/http/__init__.py": [
+ "interface:http",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/http/interface.yaml": [
+ "interface:http",
+ "static",
+ "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13"
+ ],
+ "hooks/relations/http/provides.py": [
+ "interface:http",
+ "static",
+ "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877"
+ ],
+ "hooks/relations/http/requires.py": [
+ "interface:http",
+ "static",
+ "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd"
+ ],
+ "hooks/relations/keystone-credentials/.gitignore": [
+ "interface:keystone-credentials",
+ "static",
+ "ddc61d479977d318682280fa2b18bcb6cb9a1b0e0e7897cea3d14d5c8d222e68"
+ ],
+ "hooks/relations/keystone-credentials/.gitreview": [
+ "interface:keystone-credentials",
+ "static",
+ "79122a6758c1a504d6caa55ca329e9028caf5d9a52516a4a77be2a1e676d45c8"
+ ],
+ "hooks/relations/keystone-credentials/.stestr.conf": [
+ "interface:keystone-credentials",
+ "static",
+ "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774"
+ ],
+ "hooks/relations/keystone-credentials/.zuul.yaml": [
+ "interface:keystone-credentials",
+ "static",
+ "c240e43920d05095cf5a0a9aa648685676c12bdcbb3874b79bbec5b5e7b18b7c"
+ ],
+ "hooks/relations/keystone-credentials/__init__.py": [
+ "interface:keystone-credentials",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/keystone-credentials/copyright": [
+ "interface:keystone-credentials",
+ "static",
+ "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893"
+ ],
+ "hooks/relations/keystone-credentials/interface.yaml": [
+ "interface:keystone-credentials",
+ "static",
+ "daa50ddd8948bdd6d6f8838498aa4251219f3bbe23344a05477764e6fc5ca33f"
+ ],
+ "hooks/relations/keystone-credentials/provides.py": [
+ "interface:keystone-credentials",
+ "static",
+ "67b853e714b2f43cbd671a4d6c1b85330938a6d8e24da9bf88236efcbe033499"
+ ],
+ "hooks/relations/keystone-credentials/requires.py": [
+ "interface:keystone-credentials",
+ "static",
+ "92d591067b288de5336e6228a2c84be5839354bfd050d7ce84df62c03a813785"
+ ],
+ "hooks/relations/keystone-credentials/test-requirements.txt": [
+ "interface:keystone-credentials",
+ "static",
+ "38a6e3c379a0689eb8f95d0107865847d528c020561669aad4287e1108df6ca7"
+ ],
+ "hooks/relations/kube-control/.travis.yml": [
+ "interface:kube-control",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ "hooks/relations/kube-control/README.md": [
+ "interface:kube-control",
+ "static",
+ "66ee58f59efceefa21f7f2d7f88c1d75c07a16bbec8d09a83a7fda6373eab421"
+ ],
+ "hooks/relations/kube-control/__init__.py": [
+ "interface:kube-control",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kube-control/interface.yaml": [
+ "interface:kube-control",
+ "static",
+ "07e3d781283ecbb59c780cc8e4aeb9f030f22d2db6c28d731b74a36ab126960d"
+ ],
+ "hooks/relations/kube-control/provides.py": [
+ "interface:kube-control",
+ "static",
+ "5dffb8504d0993ad756b0631fd82ef465dc9127641b448bea76596fc6f3e55c4"
+ ],
+ "hooks/relations/kube-control/requires.py": [
+ "interface:kube-control",
+ "static",
+ "496ed9b2d4f6fef2e1e26b53b8f8c97e67b9a96b4fcfcb40ef671d2469b983e3"
+ ],
+ "hooks/relations/kube-dns/README.md": [
+ "interface:kube-dns",
+ "static",
+ "f02265c0931c5582cbad911050ee1578c370e4ecaffdbf56d11505f97ce44fee"
+ ],
+ "hooks/relations/kube-dns/__init__.py": [
+ "interface:kube-dns",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kube-dns/interface.yaml": [
+ "interface:kube-dns",
+ "static",
+ "e4ca8faafe4cce43eed862d35346780df4cba4eb243baaf5aecd891514deb26d"
+ ],
+ "hooks/relations/kube-dns/provides.py": [
+ "interface:kube-dns",
+ "static",
+ "f0ea4f0610779a70860d5257f0760f62ea2ec682c5f005ba5afff92c9824aa36"
+ ],
+ "hooks/relations/kube-dns/requires.py": [
+ "interface:kube-dns",
+ "static",
+ "38b819b7ee98c3c38142d2cc8122dedd9d8c0f34767c5cc11392a564f38db370"
+ ],
+ "hooks/relations/kube-masters/README.md": [
+ "interface:kube-masters",
+ "static",
+ "37f61924210be49fc9c66595512a307e9112d09ded09c628506571c3b4f6961c"
+ ],
+ "hooks/relations/kube-masters/__init__.py": [
+ "interface:kube-masters",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kube-masters/interface.yaml": [
+ "interface:kube-masters",
+ "static",
+ "55041c3c142523c10e82ece780b178de041f1b5a1d7c2c106370d81b6cad1d42"
+ ],
+ "hooks/relations/kube-masters/peers.py": [
+ "interface:kube-masters",
+ "static",
+ "2237030141571ef6acb1934a724f1620164bb2ddf08450aab23d14b0dc7b84b7"
+ ],
+ "hooks/relations/kubernetes-cni/.gitignore": [
+ "interface:kubernetes-cni",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/kubernetes-cni/.travis.yml": [
+ "interface:kubernetes-cni",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ "hooks/relations/kubernetes-cni/README.md": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/__init__.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/interface.yaml": [
+ "interface:kubernetes-cni",
+ "static",
+ "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d"
+ ],
+ "hooks/relations/kubernetes-cni/provides.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e"
+ ],
+ "hooks/relations/kubernetes-cni/requires.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa"
+ ],
+ "hooks/relations/nrpe-external-master/README.md": [
+ "interface:nrpe-external-master",
+ "static",
+ "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636"
+ ],
+ "hooks/relations/nrpe-external-master/__init__.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/nrpe-external-master/interface.yaml": [
+ "interface:nrpe-external-master",
+ "static",
+ "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0"
+ ],
+ "hooks/relations/nrpe-external-master/provides.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e6ba708d05b227b139a86be59c83ed95a2bad030bc81e5819167ba5e1e67ecd4"
+ ],
+ "hooks/relations/nrpe-external-master/requires.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/openstack-integration/.gitignore": [
+ "interface:openstack-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/openstack-integration/LICENSE": [
+ "interface:openstack-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/openstack-integration/README.md": [
+ "interface:openstack-integration",
+ "static",
+ "ca58e21bd973f6e65f7a8a06b4aeabd50bf137ab6fab9c8defa8789b02df3aa5"
+ ],
+ "hooks/relations/openstack-integration/__init__.py": [
+ "interface:openstack-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/openstack-integration/copyright": [
+ "interface:openstack-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/openstack-integration/docs/provides.md": [
+ "interface:openstack-integration",
+ "static",
+ "ec4b81da3dfeac892f94053d753b56e504f5fd9c6ec4e743efa40efade3aa651"
+ ],
+ "hooks/relations/openstack-integration/docs/requires.md": [
+ "interface:openstack-integration",
+ "static",
+ "95424fe767a26e3208800b4099f8768212b0a72b989ee145f181b67d678e3bbe"
+ ],
+ "hooks/relations/openstack-integration/interface.yaml": [
+ "interface:openstack-integration",
+ "static",
+ "11b07a41bd2e24765231c4b7c7218da15f2173398d8d73698ecb210e599d02f6"
+ ],
+ "hooks/relations/openstack-integration/make_docs": [
+ "interface:openstack-integration",
+ "static",
+ "a564aac288cc0bf4ff14418a341f11b065988c2b64adf93ec451e09dd92dcea5"
+ ],
+ "hooks/relations/openstack-integration/provides.py": [
+ "interface:openstack-integration",
+ "static",
+ "ad09fc79fa5eb7a142477d5bf7f48b53f6ede389708de0bb297c6d009aba502b"
+ ],
+ "hooks/relations/openstack-integration/pydocmd.yml": [
+ "interface:openstack-integration",
+ "static",
+ "3568f8a3c1446dfd736f31050e2b470bf125cc41717d156a4b866c7ea53861be"
+ ],
+ "hooks/relations/openstack-integration/requires.py": [
+ "interface:openstack-integration",
+ "static",
+ "a15f5a7ffa2391f75da6bde0007700ee75f058e62430924312ff39efc6ecea6b"
+ ],
+ "hooks/relations/prometheus-manual/.gitignore": [
+ "interface:prometheus-manual",
+ "static",
+ "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881"
+ ],
+ "hooks/relations/prometheus-manual/LICENSE": [
+ "interface:prometheus-manual",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/prometheus-manual/README.md": [
+ "interface:prometheus-manual",
+ "static",
+ "506d4a334ebbe40905c76fc74e4ab5285d836ac28c7d1087b85b5a304960be2e"
+ ],
+ "hooks/relations/prometheus-manual/__init__.py": [
+ "interface:prometheus-manual",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/prometheus-manual/common.py": [
+ "interface:prometheus-manual",
+ "static",
+ "013107b3bc8f148779ada8097db725ac9c3d22c605a5794cb8bae95cace9fa4c"
+ ],
+ "hooks/relations/prometheus-manual/copyright": [
+ "interface:prometheus-manual",
+ "static",
+ "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4"
+ ],
+ "hooks/relations/prometheus-manual/docs/common.md": [
+ "interface:prometheus-manual",
+ "static",
+ "91b9e9300a2fef2ce1112cdc57a224ee06ab513ea127edc8a59b6ce9c715cd25"
+ ],
+ "hooks/relations/prometheus-manual/docs/provides.md": [
+ "interface:prometheus-manual",
+ "static",
+ "6b226c2587dbf5b304e6466f2b31bbb208512896b2ab057b11b646cf3501e292"
+ ],
+ "hooks/relations/prometheus-manual/docs/requires.md": [
+ "interface:prometheus-manual",
+ "static",
+ "0100bdc38afd892336747eac005260bc9656ffc1a40f9fb0faef824ab07c1021"
+ ],
+ "hooks/relations/prometheus-manual/interface.yaml": [
+ "interface:prometheus-manual",
+ "static",
+ "4a268318ee2adcc8a5a3482d49595d3805f94bf8976bd1ee4a4f7f9db89e472e"
+ ],
+ "hooks/relations/prometheus-manual/provides.py": [
+ "interface:prometheus-manual",
+ "static",
+ "232917934637d8905ddcd448ce51c2c30dcb9217e043592be356d510c09190c4"
+ ],
+ "hooks/relations/prometheus-manual/requires.py": [
+ "interface:prometheus-manual",
+ "static",
+ "0492a9f1037f39479f2e607162aa48ca67451e00124541a7d56f7e0a920903e0"
+ ],
+ "hooks/relations/public-address/README.md": [
+ "interface:public-address",
+ "static",
+ "7225effe61bfd8571447b8b685a2ecb52be17431b3066a5306330954c4cb064d"
+ ],
+ "hooks/relations/public-address/__init__.py": [
+ "interface:public-address",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/public-address/interface.yaml": [
+ "interface:public-address",
+ "static",
+ "49d6777a54aa84c7d3be8d531be237564e90f2e4cb2be05ef5617a372a382340"
+ ],
+ "hooks/relations/public-address/provides.py": [
+ "interface:public-address",
+ "static",
+ "7c99b0fe987d38773ed3e67c0378fdb78748c04d6895489cd4bca40aaeb051b2"
+ ],
+ "hooks/relations/public-address/requires.py": [
+ "interface:public-address",
+ "static",
+ "d6a7c6c0762d29a5db19afb4cf82af50812988d5e19a3a48fcbe8b0f6fec12a5"
+ ],
+ "hooks/relations/tls-certificates/.gitignore": [
+ "interface:tls-certificates",
+ "static",
+ "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc"
+ ],
+ "hooks/relations/tls-certificates/README.md": [
+ "interface:tls-certificates",
+ "static",
+ "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a"
+ ],
+ "hooks/relations/tls-certificates/__init__.py": [
+ "interface:tls-certificates",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/docs/common.md": [
+ "interface:tls-certificates",
+ "static",
+ "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff"
+ ],
+ "hooks/relations/tls-certificates/docs/provides.md": [
+ "interface:tls-certificates",
+ "static",
+ "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe"
+ ],
+ "hooks/relations/tls-certificates/docs/requires.md": [
+ "interface:tls-certificates",
+ "static",
+ "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e"
+ ],
+ "hooks/relations/tls-certificates/interface.yaml": [
+ "interface:tls-certificates",
+ "static",
+ "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa"
+ ],
+ "hooks/relations/tls-certificates/make_docs": [
+ "interface:tls-certificates",
+ "static",
+ "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37"
+ ],
+ "hooks/relations/tls-certificates/provides.py": [
+ "interface:tls-certificates",
+ "static",
+ "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c"
+ ],
+ "hooks/relations/tls-certificates/pydocmd.yml": [
+ "interface:tls-certificates",
+ "static",
+ "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8"
+ ],
+ "hooks/relations/tls-certificates/requires.py": [
+ "interface:tls-certificates",
+ "static",
+ "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba"
+ ],
+ "hooks/relations/tls-certificates/tls_certificates_common.py": [
+ "interface:tls-certificates",
+ "static",
+ "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c"
+ ],
+ "hooks/relations/vault-kv/.gitignore": [
+ "interface:vault-kv",
+ "static",
+ "996ad92a4713473baf27997a048901fdfa0039b9497bcc916f3f50b9000c1b96"
+ ],
+ "hooks/relations/vault-kv/README.md": [
+ "interface:vault-kv",
+ "static",
+ "30082282d57b9a7c1d0bc0311ea0a2b9d50dd8f74829696b413524a4bbffb635"
+ ],
+ "hooks/relations/vault-kv/__init__.py": [
+ "interface:vault-kv",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/vault-kv/copyright": [
+ "interface:vault-kv",
+ "static",
+ "d72972f963502ab390e2b3cdb72cc0f49afa0ef9e7d6e589607d260e6f9a577f"
+ ],
+ "hooks/relations/vault-kv/interface.yaml": [
+ "interface:vault-kv",
+ "static",
+ "e021758bd6e3536c2cbc30f08354dd23c11e2a7cc4d3b93584d3646fa64c331d"
+ ],
+ "hooks/relations/vault-kv/provides.py": [
+ "interface:vault-kv",
+ "static",
+ "82d6f62f8e92f12fe43a8803b17be29731c7e4e4b94ca53f6f141d2a3f0a5df4"
+ ],
+ "hooks/relations/vault-kv/requires.py": [
+ "interface:vault-kv",
+ "static",
+ "eaa5e8eb962fcf9d3f655d88f3e27958ac3b2b87a16904bca7d426fb6136ac27"
+ ],
+ "hooks/relations/vault-kv/test-requirements.txt": [
+ "interface:vault-kv",
+ "static",
+ "41b5d0f807a3166c534aa01f773dbdfbefcc9af37e369159a9dba6f0a8c75a78"
+ ],
+ "hooks/relations/vsphere-integration/.gitignore": [
+ "interface:vsphere-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/vsphere-integration/LICENSE": [
+ "interface:vsphere-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/vsphere-integration/README.md": [
+ "interface:vsphere-integration",
+ "static",
+ "8de815f0f938cb8f58c536899ed87e55aac507a782093bd50d50bd3c1d6add1c"
+ ],
+ "hooks/relations/vsphere-integration/__init__.py": [
+ "interface:vsphere-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/vsphere-integration/copyright": [
+ "interface:vsphere-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/vsphere-integration/docs/provides.md": [
+ "interface:vsphere-integration",
+ "static",
+ "daa3c44a2df6d774adc60bde1160f1e307129be9d696f018eab4a7e713ee775a"
+ ],
+ "hooks/relations/vsphere-integration/docs/requires.md": [
+ "interface:vsphere-integration",
+ "static",
+ "4e79bb1b151f1de63b423d39a6e1831efbb6f767fe5b84963162f62c6bbb9123"
+ ],
+ "hooks/relations/vsphere-integration/interface.yaml": [
+ "interface:vsphere-integration",
+ "static",
+ "20295b882dfb9a1750d8e988eaa3383cd3109fae510785ba4e415d7fa9b118af"
+ ],
+ "hooks/relations/vsphere-integration/make_docs": [
+ "interface:vsphere-integration",
+ "static",
+ "cd9d91049ee3c6e6148f4bd9204a34463dde905ce665cff25be014ffc1b81b89"
+ ],
+ "hooks/relations/vsphere-integration/provides.py": [
+ "interface:vsphere-integration",
+ "static",
+ "8ccb09c4a3009b59caea227ef40395fb063d3e8ce983338060fb59bbe74138c0"
+ ],
+ "hooks/relations/vsphere-integration/pydocmd.yml": [
+ "interface:vsphere-integration",
+ "static",
+ "9f8eb566569977f10955da67def28886737e80914ae000e4acfae1313d08f105"
+ ],
+ "hooks/relations/vsphere-integration/requires.py": [
+ "interface:vsphere-integration",
+ "static",
+ "d56702f60037f06259752d3bd7882f7ee46f60a4ce7b6d1071520d69ec9351f9"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vault-kv-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vault-kv-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vault-kv-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vault-kv-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vault-kv-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "kubernetes-master",
+ "static",
+ "d543b4638e3efc01d5e79b1bc0a0f8f5d42389470fe16f9cdd843f24a26aa560"
+ ],
+ "layer.yaml": [
+ "kubernetes-master",
+ "dynamic",
+ "787461ca8aa81ec65452cc479f363ee5ad14dfb54ea7333af25f3139c731cdac"
+ ],
+ "lib/charms/apt.py": [
+ "layer:apt",
+ "static",
+ "c7613992eb33ac94d83fbf02f467b614ea5112eaf561c4715def90989cefa531"
+ ],
+ "lib/charms/coordinator.py": [
+ "layer:coordinator",
+ "static",
+ "6dbacc87605be8efcbf19ec05341e4eb210327724495c79998a46947e034dbea"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/hacluster.py": [
+ "layer:hacluster",
+ "static",
+ "f58e0c1503187247f858ff3c9a1166d59107afd1557ba89e4878ec2e79304f8a"
+ ],
+ "lib/charms/layer/kubernetes_common.py": [
+ "layer:kubernetes-common",
+ "static",
+ "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101"
+ ],
+ "lib/charms/layer/kubernetes_master.py": [
+ "kubernetes-master",
+ "static",
+ "e270581b23946f18907a178ffd68145629b1df804234acf48f12bf9bed62a173"
+ ],
+ "lib/charms/layer/nagios.py": [
+ "layer:nagios",
+ "static",
+ "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/snap.py": [
+ "layer:snap",
+ "static",
+ "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "lib/charms/layer/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e"
+ ],
+ "lib/charms/layer/vault_kv.py": [
+ "layer:vault-kv",
+ "static",
+ "f34f0ae1d6b8f5d2811b1f4d6cd8edc4cdbe6e0aa5d3e9a31bbd8ba69e146fd8"
+ ],
+ "lib/charms/layer/vaultlocker.py": [
+ "layer:vaultlocker",
+ "static",
+ "fc2ae363cc3c8a9b7d46b9ec1b96b53b97c357087a8de9ae90786586584b7eb5"
+ ],
+ "lib/charms/leadership.py": [
+ "layer:leadership",
+ "static",
+ "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "lxd-profile.yaml": [
+ "kubernetes-master",
+ "static",
+ "e62700f1993721652d83756f89e1f8b33c5d0dec6fb27554f61aaf96ccd4e379"
+ ],
+ "make_docs": [
+ "layer:vaultlocker",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "kubernetes-master",
+ "dynamic",
+ "44ac6a138bbab869dc02bc9888bf2e934c9c52af2a90a04ba4c7a4f46ee70bdc"
+ ],
+ "metrics.yaml": [
+ "kubernetes-master",
+ "static",
+ "51805e00187180beb34a06c6c9d08b4a6889e02aec3e9b01043146f0002c8b51"
+ ],
+ "pydocmd.yml": [
+ "layer:vaultlocker",
+ "static",
+ "145103565659638229fec4c2d6ad7161746a75f13167d1aa16c5cb66081faf82"
+ ],
+ "pyproject.toml": [
+ "layer:apt",
+ "static",
+ "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de"
+ ],
+ "reactive/__init__.py": [
+ "layer:coordinator",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/apt.py": [
+ "layer:apt",
+ "static",
+ "6fe40f18eb84a910a71a4acb7ec74856128de846de6029b4fc297a875692c837"
+ ],
+ "reactive/cdk_service_kicker.py": [
+ "layer:cdk-service-kicker",
+ "static",
+ "cc2648443016a18324ecb26acb71d69c71610ba23df235f280383552136f7efc"
+ ],
+ "reactive/coordinator.py": [
+ "layer:coordinator",
+ "static",
+ "18cda7ddf00ae0e47578d489fc3ebb376b4428cd0559797a87ddbead54360d02"
+ ],
+ "reactive/hacluster.py": [
+ "layer:hacluster",
+ "static",
+ "0b34980232eec9866c85b55070db7e72a04689f92b338207c5839531abd0eadc"
+ ],
+ "reactive/kubernetes_master.py": [
+ "kubernetes-master",
+ "static",
+ "3e15e7d009b721b0dd6294f7fab4a8f59b4b4a217d6256960956c5c62842807a"
+ ],
+ "reactive/kubernetes_master_worker_base.py": [
+ "layer:kubernetes-master-worker-base",
+ "static",
+ "4a2d098f2e54f96b7ecef19b9485534b2da911a4a62104bd0efa40e4cb8bb519"
+ ],
+ "reactive/leadership.py": [
+ "layer:leadership",
+ "static",
+ "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
+ ],
+ "reactive/snap.py": [
+ "layer:snap",
+ "static",
+ "e4625ff4190ed33625f50d94343eda100871052ef133028f5f0ff1edfa5a23c3"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "reactive/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8"
+ ],
+ "reactive/vault_kv.py": [
+ "layer:vault-kv",
+ "static",
+ "5eaf9dc71ea652dc02a5d4d752c1cbde1541c08b269c7409145b225cc1dbf139"
+ ],
+ "reactive/vaultlocker.py": [
+ "layer:vaultlocker",
+ "static",
+ "28e31d57017933a3b7e44dfd1913bbb3525fa7910b9a43eb6ad320d098160a4f"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "setup.py": [
+ "layer:snap",
+ "static",
+ "b219c8c6cb138a2f70a8ef9136d1cc3fe6210bd1e28c99fccb5e7ae90d547164"
+ ],
+ "templates/cdk-service-kicker": [
+ "layer:cdk-service-kicker",
+ "static",
+ "b17adff995310e14d1b510337efa0af0531b55e2c487210168829e0dc1a6f99b"
+ ],
+ "templates/cdk-service-kicker.service": [
+ "layer:cdk-service-kicker",
+ "static",
+ "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029"
+ ],
+ "templates/cdk.master.auth-webhook-conf.yaml": [
+ "kubernetes-master",
+ "static",
+ "11df8c0c1a4157e7a552b864188df1dcdc99153a8b359667b640937251bad678"
+ ],
+ "templates/cdk.master.auth-webhook-secret.yaml": [
+ "kubernetes-master",
+ "static",
+ "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec"
+ ],
+ "templates/cdk.master.auth-webhook.logrotate": [
+ "kubernetes-master",
+ "static",
+ "826249ef09b9a2fe70a1b0c905e987d427d07fb1c4b82b23edee930cf8de2317"
+ ],
+ "templates/cdk.master.auth-webhook.py": [
+ "kubernetes-master",
+ "static",
+ "88c215f457d93edf2870b626ff5d0f9d8bb8d8df59507c42db68cf734f80f5c7"
+ ],
+ "templates/cdk.master.auth-webhook.service": [
+ "kubernetes-master",
+ "static",
+ "db47a820795f2c288e0bffa775b7bc28df9b9157157d8ff5dc8ac69f7911f057"
+ ],
+ "templates/cdk.master.leader.file-watcher.path": [
+ "kubernetes-master",
+ "static",
+ "f5698867fafd661f270c41a7ddb3dccf14f974dff65decc59148888c4bc5a9d3"
+ ],
+ "templates/cdk.master.leader.file-watcher.service": [
+ "kubernetes-master",
+ "static",
+ "aeceb87e99da56744159e36ae1657551b570fcae1c452fb8a28b660134b7ee1b"
+ ],
+ "templates/cdk.master.leader.file-watcher.sh": [
+ "kubernetes-master",
+ "static",
+ "99a5398879580b219664447f58384244688fad9923f5aed7477403ac4cc813c5"
+ ],
+ "templates/ceph-secret.yaml": [
+ "kubernetes-master",
+ "static",
+ "cf38a4f47dd3399f5a07f16e889dfc755fcf63541e0b8e0e6936b5993f360b44"
+ ],
+ "templates/ceph.conf": [
+ "kubernetes-master",
+ "static",
+ "63d2a3751ea00ae8ccce61d642735f949031595e1a170aa1830105f1cd3c7266"
+ ],
+ "templates/create-namespace.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "8ace952fc7f873b0ec2c0f9843a3e39023e20da5397d799c442a78749aa96239"
+ ],
+ "templates/grafana/autoload/kubernetes.json": [
+ "kubernetes-master",
+ "static",
+ "085ee057337177c9ea31153b9c910eb7eabd6ca3536cc864ab19b047fd4b2e80"
+ ],
+ "templates/grafana/conditional/prometheus.json": [
+ "kubernetes-master",
+ "static",
+ "f066e89b01609616aca4da14439f51afe8bde0046d254273fb746a9ea582a3c5"
+ ],
+ "templates/grafana/conditional/telegraf.json": [
+ "kubernetes-master",
+ "static",
+ "845f3b66e6899693e53b09dca874d36d387e99c6714f525f6823ab27f81ef220"
+ ],
+ "templates/keystone-api-server-webhook.yaml": [
+ "kubernetes-master",
+ "static",
+ "57d856c3e55fbfddf08a3952c3a4864713c345f6a4fe42aa4861429c85af8de8"
+ ],
+ "templates/kube-keystone.sh": [
+ "kubernetes-master",
+ "static",
+ "f346f743809da597a37b6b3531cfb525de7cd7196827e818770ae57781f7f47b"
+ ],
+ "templates/kube-proxy-iptables-fix.sh": [
+ "kubernetes-master",
+ "static",
+ "62313fd28f76cfc6e5f2dd426c3ca7a728c91bf064d532e39d8e8fb51a115bbb"
+ ],
+ "templates/nagios_plugin.py": [
+ "kubernetes-master",
+ "static",
+ "0627dc0fe546a6262a9e8b0ca265d783d76ef18b546bb2966b4ff4114db1b392"
+ ],
+ "templates/prometheus/k8s-api-endpoints.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "78af8a158956011c8abfb11895fab3e67c2d7d6a092c09fa0b2ddead1ee9549b"
+ ],
+ "templates/prometheus/kube-state-metrics.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "cd01643061d21fc061fde4dacd28bd5dda9938ce531868c9c5a529a390ad29ec"
+ ],
+ "templates/prometheus/kube-state-telemetry.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "278e98c6abe9312053c08e72160181c968fcf447b3902bb608c4971deca7b192"
+ ],
+ "templates/prometheus/kubernetes-cadvisor.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "df55e745681353b08029262e4e806f6deed99add34c19950ccd0aa7dabcd226d"
+ ],
+ "templates/prometheus/kubernetes-nodes.yaml.j2": [
+ "kubernetes-master",
+ "static",
+ "6f22e3b7a6a87d7d50b1ad3099fccf08677acc434273f032952d8bf7f548c612"
+ ],
+ "templates/rbac-pod-security-policy.yaml": [
+ "kubernetes-master",
+ "static",
+ "b4e7b7c0976f1a0175c0e60b458e3e9d8bd486849033c4cff7d2684793aa603a"
+ ],
+ "templates/rbac-proxy.yaml": [
+ "kubernetes-master",
+ "static",
+ "abb77f196e008fc636c254c89672bb889ca34a91103972c11a5e2e59aa608400"
+ ],
+ "templates/rbd-persistent-volume.yaml": [
+ "kubernetes-master",
+ "static",
+ "bdee575ef92912dda50d2e82aafab359168aac32a78de2bd9131bcb554669966"
+ ],
+ "templates/service-always-restart.systemd-229.conf": [
+ "kubernetes-master",
+ "static",
+ "516958fbf8b9a05cc86f6700d0de7bdc6b2ba1847d69fbe1214e23b52e00b064"
+ ],
+ "templates/service-always-restart.systemd-latest.conf": [
+ "kubernetes-master",
+ "static",
+ "37de98817682363d48b3dd2b635f5cfb281533aaa9d3836d1af44f9d6a59984c"
+ ],
+ "templates/service-iptables-fix.service": [
+ "kubernetes-master",
+ "static",
+ "3f8a29c719c175e17a7a69756223babf0e2b56f8a8d69dbbd81e8e0889863669"
+ ],
+ "templates/system-monitoring-rbac-role.yaml": [
+ "kubernetes-master",
+ "static",
+ "a50f45a1e978ffeaf872f961c2f8ff95fbc144462baef42bcdda3c51da03f54f"
+ ],
+ "templates/vaultlocker-loop@.service": [
+ "layer:vaultlocker",
+ "static",
+ "57d81403c04033d382094b3c8a60c4728eb0fad146746921fe7e770b4c49f758"
+ ],
+ "templates/vaultlocker.conf.j2": [
+ "layer:vaultlocker",
+ "static",
+ "7428fcfb91731d37be14a0f8d4c5923cc95a28bd28579c5a013928ab147b0beb"
+ ],
+ "tox.ini": [
+ "layer:vaultlocker",
+ "static",
+ "716854030c843efa3afd32b8742807f1515b5fc705d8ae81aac9b447ffae411f"
+ ],
+ "version": [
+ "kubernetes-master",
+ "dynamic",
+ "b14065491445a78e202c652a03eeb39145fa129707d518a602bd1f66517791a0"
+ ],
+ "wheelhouse.txt": [
+ "kubernetes-master",
+ "dynamic",
+ "5e9ee16ba4e28c49c5c745ae19b6a149a01cca6c22c47feed0954c10f935ca7e"
+ ],
+ "wheelhouse/Flask-1.1.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "4efa1ae2d7c9865af48986de8aeb8504bf32c7f3d6fdc9353d34b21f4b127060"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/Werkzeug-1.0.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"
+ ],
+ "wheelhouse/certifi-2020.12.5.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"
+ ],
+ "wheelhouse/chardet-4.0.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/click-7.1.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"
+ ],
+ "wheelhouse/gunicorn-20.1.0.tar.gz": [
+ "kubernetes-master",
+ "dynamic",
+ "e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8"
+ ],
+ "wheelhouse/hvac-0.10.10.tar.gz": [
+ "layer:vault-kv",
+ "dynamic",
+ "80888c009c7e310a34d480ce45fb33a44b479cb9b8a3f3c467b6ffcfff0569f4"
+ ],
+ "wheelhouse/idna-2.10.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"
+ ],
+ "wheelhouse/itsdangerous-1.1.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/netifaces-0.10.9.tar.gz": [
+ "layer:vault-kv",
+ "dynamic",
+ "2dee9ffdd16292878336a58d04a20f0ffe95555465fee7c9bd23b3490ef2abf3"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/psutil-5.8.0.tar.gz": [
+ "layer:vault-kv",
+ "dynamic",
+ "0c9ccb99ab76025f2f0bbecf341d4656e9c1351db8cc8a03ccd62e318ab4b5c6"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/requests-2.25.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/tenacity-7.0.0.tar.gz": [
+ "layer:snap",
+ "dynamic",
+ "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1"
+ ],
+ "wheelhouse/urllib3-1.26.4.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/kubernetes-master/.github/workflows/main.yml b/kubernetes-master/.github/workflows/main.yml
new file mode 100644
index 0000000..ded79fa
--- /dev/null
+++ b/kubernetes-master/.github/workflows/main.yml
@@ -0,0 +1,56 @@
+name: Test Suite
+on: [pull_request]
+
+jobs:
+ lint:
+ name: Lint
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.6, 3.7, 3.8, 3.9]
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Dependencies
+ run: |
+ pip install tox
+ - name: Run lint
+ run: tox -vve lint
+ unit-test:
+ name: Unit Tests
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: [3.6, 3.7, 3.8, 3.9]
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install Dependencies
+ run: |
+ pip install tox
+ - name: Run test
+ run: tox -vve unit
+ integration-test:
+ name: Integration test with VMWare
+ runs-on: self-hosted
+ timeout-minutes: 360
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ - name: Install Dependencies
+ run: |
+ pip install tox
+ - name: Run test
+ run: tox -e integration
diff --git a/kubernetes-master/.gitignore b/kubernetes-master/.gitignore
new file mode 100644
index 0000000..4701d96
--- /dev/null
+++ b/kubernetes-master/.gitignore
@@ -0,0 +1,5 @@
+.tox/
+__pycache__/
+*.pyc
+placeholders/
+*.charm
diff --git a/kubernetes-master/.travis.yml b/kubernetes-master/.travis.yml
new file mode 100644
index 0000000..66d8e1f
--- /dev/null
+++ b/kubernetes-master/.travis.yml
@@ -0,0 +1,7 @@
+language: python
+python:
+ - "3.5"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-master/.travis/profile-update.yaml b/kubernetes-master/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/kubernetes-master/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/kubernetes-master/CONTRIBUTING.md b/kubernetes-master/CONTRIBUTING.md
new file mode 100644
index 0000000..f0d8d31
--- /dev/null
+++ b/kubernetes-master/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, pleas euse the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-etcd/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-etcd.md)
diff --git a/kubernetes-master/LICENSE b/kubernetes-master/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/kubernetes-master/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/kubernetes-master/Makefile b/kubernetes-master/Makefile
new file mode 100644
index 0000000..a1ad3a5
--- /dev/null
+++ b/kubernetes-master/Makefile
@@ -0,0 +1,24 @@
+#!/usr/bin/make
+
+all: lint unit_test
+
+
+.PHONY: clean
+clean:
+ @rm -rf .tox
+
+.PHONY: apt_prereqs
+apt_prereqs:
+ @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
+ @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
+
+.PHONY: lint
+lint: apt_prereqs
+ @tox --notest
+ @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
+ @charm proof
+
+.PHONY: unit_test
+unit_test: apt_prereqs
+ @echo Starting tests...
+ tox
diff --git a/kubernetes-master/README.md b/kubernetes-master/README.md
new file mode 100644
index 0000000..02f24a6
--- /dev/null
+++ b/kubernetes-master/README.md
@@ -0,0 +1,53 @@
+# Kubernetes-master
+
+[Kubernetes](http://kubernetes.io/) is an open source system for managing
+application containers across a cluster of hosts. The Kubernetes project was
+started by Google in 2014, combining the experience of running production
+workloads combined with best practices from the community.
+
+The Kubernetes project defines some new terms that may be unfamiliar to users
+or operators. For more information please refer to the concept guide in the
+[getting started guide](https://kubernetes.io/docs/home/).
+
+This charm is an encapsulation of the Kubernetes master processes and the
+operations to run on any cloud for the entire lifecycle of the cluster.
+
+This charm is built from other charm layers using the Juju reactive framework.
+The other layers focus on specific subset of operations making this layer
+specific to operations of Kubernetes master processes.
+
+# Charmed Kubernetes
+
+This charm is not fully functional when deployed by itself. It requires other
+charms to model a complete Kubernetes cluster. A Kubernetes cluster needs a
+distributed key value store such as [Etcd](https://coreos.com/etcd/) and the
+kubernetes-worker charm which delivers the Kubernetes node services. A cluster
+also requires a Software Defined Network (SDN), a Container Runtime such as
+[containerd](https://jaas.ai/u/containers/containerd), and Transport Layer
+Security (TLS) so the components in a cluster communicate securely.
+
+Please take a look at the [Charmed Kubernetes](https://jaas.ai/charmed-kubernetes)
+or the [Kubernetes core](https://jaas.ai/kubernetes-core) bundles for
+examples of complete models of Kubernetes clusters.
+
+For full install instructions, please see the [Charmed Kubernetes documentation](https://ubuntu.com/kubernetes/docs/quickstart).
+
+For details on configuring and operating this charm, see the [kubernetes-master documentation](https://ubuntu.com/kubernetes/docs/charm-kubernetes-master) on the same site.
+
+# Developers
+
+## Building the charm
+
+```
+make charm
+```
+
+## Testing the charm
+
+```
+tox
+```
+
+Note that the unit tests use [`charms.unit_test`](https://pypi.org/project/charms.unit-test/)
+so all charms.reactive helpers are automatically patched with fakes and little manual
+patching needs to be done. Things like `set_flag` and `is_flag_set` can be used directly.
diff --git a/kubernetes-master/actions.yaml b/kubernetes-master/actions.yaml
new file mode 100644
index 0000000..8abf0cb
--- /dev/null
+++ b/kubernetes-master/actions.yaml
@@ -0,0 +1,137 @@
+"debug":
+ "description": "Collect debug data"
+"cis-benchmark":
+ "description": |
+ Run the CIS Kubernetes Benchmark against snap-based components.
+ "params":
+ "apply":
+ "type": "string"
+ "default": "none"
+ "description": |
+ Apply remediations to address benchmark failures. The default, 'none',
+ will not attempt to fix any reported failures. Set to 'conservative'
+ to resolve simple failures. Set to 'dangerous' to attempt to resolve
+ all failures.
+
+ Note: Applying any remediation may result in an unusable cluster.
+ "config":
+ "type": "string"
+ "default": "https://github.com/charmed-kubernetes/kube-bench-config/archive/cis-1.5.zip#sha1=811f21dbf6c841bafdbfbd8a21f912ad67582f46"
+ "description": |
+ Archive containing configuration files to use when running kube-bench.
+ The default value is known to be compatible with snap components. When
+ using a custom URL, append '#=' to verify the
+ archive integrity when downloaded.
+ "release":
+ "type": "string"
+ "default": "https://github.com/aquasecurity/kube-bench/releases/download/v0.3.1/kube-bench_0.3.1_linux_amd64.tar.gz#sha256=6616f1373987259285e2f676a225d4a3885cd62b7e7a116102ff2fb445724281"
+ "description": |
+ Archive containing the 'kube-bench' binary to run. The default value
+ points to a stable upstream release. When using a custom URL, append
+ '#=' to verify the archive integrity when
+ downloaded.
+
+ This may also be set to the special keyword 'upstream'. In this case,
+ the action will compile and use a local kube-bench binary built from
+ the master branch of the upstream repository:
+ https://github.com/aquasecurity/kube-bench
+
+"restart":
+ "description": "Restart the Kubernetes master services on demand."
+"create-rbd-pv":
+ "description": "Create RADOS Block Device (RDB) volume in Ceph and creates PersistentVolume.\
+ \ Note this is deprecated on Kubernetes >= 1.10 in favor of CSI, where PersistentVolumes\
+ \ are created dynamically to back PersistentVolumeClaims."
+ "params":
+ "name":
+ "type": "string"
+ "description": "Name the persistent volume."
+ "minLength": !!int "1"
+ "size":
+ "type": "integer"
+ "description": "Size in MB of the RBD volume."
+ "minimum": !!int "1"
+ "mode":
+ "type": "string"
+ "default": "ReadWriteOnce"
+ "description": "Access mode for the persistent volume."
+ "filesystem":
+ "type": "string"
+ "default": "xfs"
+ "description": "File system type to format the volume."
+ "skip-size-check":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": "Allow creation of overprovisioned RBD."
+ "required":
+ - "name"
+ - "size"
+"namespace-list":
+ "description": "List existing k8s namespaces"
+"namespace-create":
+ "description": "Create new namespace"
+ "params":
+ "name":
+ "type": "string"
+ "description": "Namespace name eg. staging"
+ "minLength": !!int "2"
+ "required":
+ - "name"
+"namespace-delete":
+ "description": "Delete namespace"
+ "params":
+ "name":
+ "type": "string"
+ "description": "Namespace name eg. staging"
+ "minLength": !!int "2"
+ "required":
+ - "name"
+"upgrade":
+ "description": "Upgrade the kubernetes snaps"
+ "params":
+ "fix-cluster-name":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": >-
+ If using the OpenStack cloud provider, whether to fix the cluster
+ name sent to it to include the cluster tag. This fixes an issue
+ with load balancers conflicting with other clusters in the same
+ project but will cause new load balancers to be created which will
+ require manual intervention to resolve.
+"user-create":
+ "description": "Create a new user"
+ "params":
+ "name":
+ "type": "string"
+ "description": |
+ Username for the new user. This value must only contain alphanumeric
+ characters, ':', '@', '-' or '.'.
+ "minLength": !!int "2"
+ "groups":
+ "type": "string"
+ "description": |
+ Optional comma-separated list of groups eg. 'system:masters,managers'
+ "required":
+ - "name"
+"user-delete":
+ "description": "Delete an existing user"
+ "params":
+ "name":
+ "type": "string"
+ "description": "Username of the user to delete"
+ "minLength": !!int "2"
+ "required":
+ - "name"
+"user-list":
+ "description": "List existing users"
+"get-kubeconfig":
+ "description": "Retrieve Kubernetes cluster config, including credentials"
+"apply-manifest":
+ "description": "Apply JSON formatted Kubernetes manifest to cluster"
+ "params":
+ "json":
+ "type": "string"
+ "description": "The content of the manifest to deploy in JSON format"
+ "minLength": !!int "2"
+ "required":
+ - "json"
diff --git a/kubernetes-master/actions/apply-manifest b/kubernetes-master/actions/apply-manifest
new file mode 100755
index 0000000..60e67eb
--- /dev/null
+++ b/kubernetes-master/actions/apply-manifest
@@ -0,0 +1,75 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import tempfile
+import subprocess
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+
+
+def _kubectl(args):
+ """
+ Executes kubectl with args as arguments
+ """
+ snap_bin = os.path.join(os.sep, "snap", "bin")
+ env = os.environ.copy()
+ env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]])
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return subprocess.check_output(
+ cmd,
+ env=env,
+ stderr=subprocess.STDOUT,
+ )
+
+
+def get_kubeconfig():
+ """
+ Read the kubeconfig on the master and return it as JSON
+ """
+ try:
+ result = _kubectl(["config", "view", "-o", "json", "--raw"])
+ # JSON format verification
+ kubeconfig = json.dumps(json.loads(result))
+ action_set({"kubeconfig": kubeconfig})
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse kubeconfig: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to retrieve kubeconfig: {}".format(str(e)))
+
+
+def apply_manifest():
+ """
+ Applies a user defined manifest with kubectl
+ """
+ _, apply_path = tempfile.mkstemp(suffix=".json")
+ try:
+ manifest = json.loads(action_get("json"))
+ with open(apply_path, "w") as manifest_file:
+ json.dump(manifest, manifest_file)
+ output = _kubectl(["apply", "-f", apply_path])
+
+ action_set(
+ {
+ "summary": "Manifest applied.",
+ "output": output.decode("utf-8"),
+ }
+ )
+ except subprocess.CalledProcessError as e:
+ action_fail(
+ "kubectl failed with exit code {} and message: {}".format(
+ e.returncode, e.output
+ )
+ )
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse JSON manifest: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to apply manifest: {}".format(str(e)))
+ finally:
+ os.unlink(apply_path)
+
+
+action = action_name()
+if action == "get-kubeconfig":
+ get_kubeconfig()
+elif action == "apply-manifest":
+ apply_manifest()
diff --git a/kubernetes-master/actions/cis-benchmark b/kubernetes-master/actions/cis-benchmark
new file mode 100755
index 0000000..3f91dea
--- /dev/null
+++ b/kubernetes-master/actions/cis-benchmark
@@ -0,0 +1,371 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+from pathlib import Path
+
+import charms.layer
+import charms.reactive
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler
+from charms.layer import snap
+from charms.reactive import clear_flag, is_flag_set, set_flag
+
+
+BENCH_HOME = '/home/ubuntu/kube-bench'
+BENCH_BIN = '{}/kube-bench'.format(BENCH_HOME)
+BENCH_CFG = '{}/cfg-ck'.format(BENCH_HOME)
+GO_PKG = 'github.com/aquasecurity/kube-bench'
+RESULTS_DIR = '/home/ubuntu/kube-bench-results'
+
+# Remediation dicts associate a failing test with a tuple to fix it.
+# Conservative fixes will probably leave the cluster in a good state.
+# Dangerous fixes will likely break the cluster.
+# Tuple examples:
+# {'1.2.3': ('manual -- we don't know how to auto fix this', None, None)}
+# {'1.2.3': ('cli', 'command to run', None)}
+# {'1.2.3': ('kv', 'snap', {cfg_key: value})}
+CONSERVATIVE = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.21': ('kv', 'kube-apiserver', {'profiling': 'false'}),
+ '1.2.23': ('kv', 'kube-apiserver', {'audit-log-maxage': '30'}),
+ '1.2.24': ('kv', 'kube-apiserver', {'audit-log-maxbackup': '10'}),
+ '1.3.1': ('kv', 'kube-controller-manager', {'terminated-pod-gc-threshold': '500'}),
+ '1.3.2': ('kv', 'kube-controller-manager', {'profiling': 'false'}),
+ '1.4.1': ('kv', 'kube-scheduler', {'profiling': 'false'}),
+
+ # k8s-worker
+ '4.2.2': ('kv', 'kubelet', {'authorization-mode': 'Webhook'}),
+ '4.2.4': ('kv', 'kubelet', {'read-only-port': '0'}),
+ '4.2.6': ('kv', 'kubelet', {'protect-kernel-defaults': 'true'}),
+}
+ADMISSION_PLUGINS = {'enable-admission-plugins': ('PersistentVolumeLabel',
+ 'PodSecurityPolicy,'
+ 'ServiceAccount,'
+ 'NodeRestriction')}
+DANGEROUS = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.2': ('kv', 'kube-apiserver', {'basic-auth-file': None}),
+ '1.2.3': ('kv', 'kube-apiserver', {'token-auth-file': None}),
+ '1.2.7': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.8': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.9': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.14': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.16': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.17': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.18': ('kv', 'kube-apiserver', {'insecure-bind-address': None}),
+ '1.2.19': ('kv', 'kube-apiserver', {'insecure-port': '0'}),
+ '1.2.33': ('manual', None, None),
+ '1.3.6': ('kv', 'kube-controller-manager',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+
+ # k8s-worker
+ '4.2.12': ('kv', 'kubelet',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+}
+
+
+def _fail(msg):
+ '''Fail the action with a given message.'''
+ hookenv.action_fail(msg)
+ sys.exit()
+
+
+def _move_matching_parent(dirpath, filename, dest):
+ '''Move a parent directory that contains a specific file.
+
+ Helper function that walks a directory looking for a given file. If found,
+ the file's parent directory is moved to the given destination.
+
+ :param: dirpath: String path to search
+ :param: filename: String file to find
+ :param: dest: String destination of the found parent directory
+ '''
+ for root, _, files in os.walk(dirpath):
+ for name in files:
+ if name == filename:
+ hookenv.log('Moving {} to {}'.format(root, dest))
+ shutil.move(root, dest)
+ return
+ else:
+ _fail('Could not find {} in {}'.format(filename, dirpath))
+
+
+def _restart_charm():
+ '''Set charm-specific flags and call reactive.main().'''
+ app = hookenv.charm_name() or 'unknown'
+ if 'master' in app:
+ hookenv.log('Restarting master')
+ clear_flag('kubernetes-master.components.started')
+ elif 'worker' in app:
+ hookenv.log('Restarting worker')
+ set_flag('kubernetes-worker.restart-needed')
+ elif 'etcd' in app:
+ hookenv.log('No-op: etcd does not need to be restarted')
+ return
+ else:
+ _fail('Unable to determine the charm to restart: {}'.format(app))
+
+ # Invoke reactive so the charm will react to the flags we just managed
+ charms.layer.import_layer_libs()
+ charms.reactive.main()
+
+
+def install(release, config):
+ '''Install kube-bench and related configuration.
+
+ Release and configuration are set via action params. If installing an
+ upstream release, this method will also install 'go' if needed.
+
+ :param: release: Archive URI or 'upstream'
+ :param: config: Archive URI of configuration files
+ '''
+ if Path(BENCH_HOME).exists():
+ shutil.rmtree(BENCH_HOME)
+ fetcher = ArchiveUrlFetchHandler()
+
+ if release == 'upstream':
+ Path(BENCH_HOME).mkdir(parents=True, exist_ok=True)
+
+ # Setup the 'go' environment
+ env = os.environ.copy()
+ go_bin = shutil.which('go', path='{}:/snap/bin'.format(env['PATH']))
+ if not go_bin:
+ snap.install('go', channel='stable', classic=True)
+ go_bin = '/snap/bin/go'
+ go_cache = os.getenv('GOCACHE', '/var/snap/go/common/cache')
+ go_path = os.getenv('GOPATH', '/var/snap/go/common')
+ env['GOCACHE'] = go_cache
+ env['GOPATH'] = go_path
+ Path(go_path).mkdir(parents=True, exist_ok=True)
+
+ # From https://github.com/aquasecurity/kube-bench#installing-from-sources
+ go_cmd = ('{bin} get {pkg} '
+ 'github.com/golang/dep/cmd/dep'.format(bin=go_bin, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+
+ go_cmd = ('{bin} build -o {out} {base}/src/{pkg}'.format(
+ bin=go_bin, out=BENCH_BIN, base=go_path, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+ else:
+ # Fetch the release URI and put it in the right place.
+ archive_path = fetcher.install(source=release)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'kube-bench' belongs in our BENCH_HOME.
+ _move_matching_parent(
+ dirpath=archive_path, filename='kube-bench', dest=BENCH_HOME)
+
+ # Fetch the config URI and put it in the right place.
+ archive_dir = fetcher.install(source=config)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'config.yaml' belongs in our BENCH_CFG.
+ _move_matching_parent(
+ dirpath=archive_dir, filename='config.yaml', dest=BENCH_CFG)
+
+
+def apply(remediations=None):
+ '''Apply remediations to address benchmark failures.
+
+ :param: remediations: either 'conservative' or 'dangerous'
+ '''
+ applied_fixes = 0
+ danger = True if remediations == 'dangerous' else False
+ db = unitdata.kv()
+
+ json_log = report(log_format='json')
+ hookenv.log('Loading JSON from: {}'.format(json_log))
+ try:
+ with open(json_log, 'r') as f:
+ full_json = json.load(f)
+ except Exception:
+ _fail('Failed to load: {}'.format(json_log))
+
+ for test in full_json.get('tests', {}):
+ for result in test.get('results', {}):
+ test_num = result.get('test_number')
+ test_remediation = result.get('remediation')
+ test_status = result.get('status', '')
+
+ if test_status.lower() == 'fail':
+ test_remedy = CONSERVATIVE.get(test_num)
+ if not test_remedy and danger:
+ # no conservative remedy, check dangerous if user wants
+ test_remedy = DANGEROUS.get(test_num)
+ if isinstance(test_remedy, tuple):
+ if test_remedy[0] == 'manual':
+ # we don't know how to autofix; log remediation text
+ hookenv.log('Test {}: unable to auto-apply remedy.\n'
+ 'Manual steps:\n{}'.format(test_num,
+ test_remediation))
+ elif test_remedy[0] == 'cli':
+ cmd = shlex.split(test_remedy[1])
+ try:
+ out = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError:
+ _fail('Test {}: failed to run: {}'.format(test_num, cmd))
+ else:
+ hookenv.log('Test {}: applied remedy: {}\n'
+ 'Output: {}'.format(test_num, cmd, out))
+ applied_fixes += 1
+ elif test_remedy[0] == 'kv':
+ cfg_key = 'cis-' + test_remedy[1]
+ cfg = db.get(cfg_key) or {}
+ cfg.update(test_remedy[2])
+ db.set(cfg_key, cfg)
+
+ hookenv.log('Test {}: updated configuration: {}\n'.format(
+ test_num, cfg))
+ applied_fixes += 1
+ else:
+ hookenv.log('Test {}: remediation is missing'.format(test_num))
+
+ # CLI and KV changes will require a charm restart; do it.
+ if applied_fixes > 0:
+ _restart_charm()
+
+ msg = ('Applied {} remediations. Re-run with "apply=none" to generate a '
+ 'new report.').format(applied_fixes)
+ hookenv.action_set({'summary': msg})
+
+
+def reset():
+ '''Reset any remediations we applied to unitdata.kv().
+
+ This action does not track individual remediations to reset. Therefore,
+ this function unconditionally unsets all 'cis-' prefixed arguments that
+ this action may have set and restarts the relevant charm.
+ '''
+ db = unitdata.kv()
+
+ db.unset('cis-kube-apiserver')
+ db.unset('cis-kube-scheduler')
+ db.unset('cis-kube-controller-manager')
+ db.unset('cis-kubelet')
+ _restart_charm()
+
+ hookenv.action_set({'summary': ('Reset is complete. Re-run with '
+ '"apply=none" to generate a new report.')})
+
+
+def report(log_format='text'):
+ '''Run kube-bench and report results.
+
+ By default, save the full plain-text results to our RESULTS_DIR and set
+ action output with a summary. This function can also save full results in
+ a machine-friendly json format.
+
+ :param: log_format: String determines if output is text or json
+ :returns: Path to results log
+ '''
+ Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True)
+
+ # Node type is different depending on the charm
+ app = hookenv.charm_name() or 'unknown'
+ version = 'cis-1.5'
+ if 'master' in app:
+ target = 'master'
+ elif 'worker' in app:
+ target = 'node'
+ elif 'etcd' in app:
+ target = 'etcd'
+ else:
+ _fail('Unable to determine the target to benchmark: {}'.format(app))
+
+ # Commands and log names are different depending on the format
+ if log_format == 'json':
+ log_prefix = 'results-json-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} --json run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+ else:
+ log_prefix = 'results-text-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ summary_cmd = ('{bin} -D {cfg} --benchmark {ver} '
+ '--noremediations --noresults run --targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ # Store full results for future consumption
+ with tempfile.NamedTemporaryFile(mode='w+b', prefix=log_prefix,
+ dir=RESULTS_DIR, delete=False) as res_file:
+ try:
+ subprocess.call(shlex.split(verbose_cmd), stdout=res_file)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(verbose_cmd))
+ else:
+ # remember the filename for later (and make it readable, why not?)
+ Path(res_file.name).chmod(0o644)
+ log = res_file.name
+
+ # When making a summary, we also have a verbose report. Set action output
+ # so operators can see everything related to this run.
+ try:
+ out = subprocess.check_output(shlex.split(summary_cmd),
+ universal_newlines=True)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(summary_cmd))
+ else:
+ fetch_cmd = 'juju scp {unit}:{file} .'.format(unit=hookenv.local_unit(),
+ file=log)
+ hookenv.action_set({'cmd': summary_cmd,
+ 'report': fetch_cmd,
+ 'summary': out})
+
+ return log or None
+
+
+if __name__ == '__main__':
+ if not (is_flag_set('snap.installed.etcd') or
+ is_flag_set('kubernetes-master.snaps.installed') or
+ is_flag_set('kubernetes-worker.snaps.installed')):
+ msg = 'Snaps are not yet installed on this unit.'
+ _fail(msg)
+
+ # Validate action params
+ release = hookenv.action_get('release') or 'upstream'
+ config = hookenv.action_get('config')
+ if not config:
+ msg = 'Missing "config" parameter'
+ _fail(msg)
+ remediations = hookenv.action_get('apply')
+ if remediations not in ['none', 'conservative', 'dangerous', 'reset']:
+ msg = 'Invalid "apply" parameter: {}'.format(remediations)
+ _fail(msg)
+
+ # TODO: may want an option to overwrite an existing install
+ if Path(BENCH_BIN).exists() and Path(BENCH_CFG).exists():
+ hookenv.log('{} exists; skipping install'.format(BENCH_HOME))
+ else:
+ hookenv.log('Installing benchmark from: {}'.format(release))
+ install(release, config)
+
+ # Reset, remediate, or report
+ if remediations == 'reset':
+ hookenv.log('Attempting to remove all remediations')
+ reset()
+ elif remediations != 'none':
+ hookenv.log('Applying "{}" remediations'.format(remediations))
+ apply(remediations)
+ else:
+ hookenv.log('Report only; no remediations were requested')
+ report(log_format='text')
diff --git a/kubernetes-master/actions/create-rbd-pv b/kubernetes-master/actions/create-rbd-pv
new file mode 100755
index 0000000..22a0c88
--- /dev/null
+++ b/kubernetes-master/actions/create-rbd-pv
@@ -0,0 +1,330 @@
+#!/usr/local/sbin/charm-env python3
+
+# Copyright 2015 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import is_state
+from charmhelpers.core.templating import render
+from charmhelpers.core.hookenv import action_get
+from charmhelpers.core.hookenv import action_set
+from charmhelpers.core.hookenv import action_fail
+from subprocess import check_call
+from subprocess import check_output
+from subprocess import CalledProcessError
+from tempfile import TemporaryDirectory
+import json
+import re
+import os
+import sys
+
+from charms.layer.kubernetes_master import install_ceph_common
+
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+
+def main():
+ """Control logic to enlist Ceph RBD volumes as PersistentVolumes in
+ Kubernetes. This will invoke the validation steps, and only execute if
+ this script thinks the environment is 'sane' enough to provision volumes.
+
+ :return: None
+ """
+ # k8s >= 1.10 uses CSI and doesn't directly create persistent volumes.
+ if get_version('kube-apiserver') >= (1, 10):
+ print('This action is deprecated in favor of CSI creation of')
+ print('persistent volumes in Kubernetes >= 1.10. Just create the PVC')
+ print('and a PV will be created for you.')
+ action_fail('Deprecated, just create PVC.')
+ return
+
+ # validate relationship pre-reqs before additional steps can be taken.
+ if not validate_relation():
+ print('Failed ceph relationship check')
+ action_fail('Failed ceph relationship check')
+ return
+
+ if not is_ceph_healthy():
+ print('Ceph was not healthy.')
+ action_fail('Ceph was not healthy.')
+ return
+
+ install_ceph_common()
+
+ context = {}
+
+ context['RBD_NAME'] = action_get_or_default('name').strip()
+ context['RBD_SIZE'] = action_get_or_default('size')
+ context['RBD_FS'] = action_get_or_default('filesystem').strip()
+ context['PV_MODE'] = action_get_or_default('mode').strip()
+
+ # Ensure we're not exceeding available space in the pool
+ if not validate_space(context['RBD_SIZE']):
+ return
+
+ # Ensure our parameters match
+ param_validation = validate_parameters(context['RBD_NAME'],
+ context['RBD_FS'],
+ context['PV_MODE'])
+ if not param_validation == 0:
+ return
+
+ if not validate_unique_volume_name(context['RBD_NAME']):
+ action_fail('Volume name collision detected. Volume creation aborted.')
+ return
+
+ context['monitors'] = get_monitors()
+
+ # Invoke creation and format the mount device
+ create_rbd_volume(context['RBD_NAME'],
+ context['RBD_SIZE'],
+ context['RBD_FS'])
+
+ # Create a temporary workspace to render our persistentVolume template, and
+ # enlist the RDB based PV we've just created
+ with TemporaryDirectory() as active_working_path:
+ temp_template = '{}/pv.yaml'.format(active_working_path)
+ render('rbd-persistent-volume.yaml', temp_template, context)
+
+ cmd = ['kubectl', 'create', '-f', temp_template]
+ debug_command(cmd)
+ check_call(cmd)
+
+
+def get_version(bin_name):
+ """Get the version of an installed Kubernetes binary.
+
+ :param str bin_name: Name of binary
+ :return: 3-tuple version (maj, min, patch)
+
+ Example::
+
+ >>> `get_version('kubelet')
+ (1, 6, 0)
+
+ """
+ cmd = '{} --version'.format(bin_name).split()
+ version_string = check_output(cmd).decode('utf-8')
+ return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
+
+
+def action_get_or_default(key):
+ ''' Convenience method to manage defaults since actions dont appear to
+ properly support defaults '''
+
+ value = action_get(key)
+ if value:
+ return value
+ elif key == 'filesystem':
+ return 'xfs'
+ elif key == 'size':
+ return 0
+ elif key == 'mode':
+ return "ReadWriteOnce"
+ elif key == 'skip-size-check':
+ return False
+ else:
+ return ''
+
+
+def create_rbd_volume(name, size, filesystem):
+ ''' Create the RBD volume in Ceph. Then mount it locally to format it for
+ the requested filesystem.
+
+ :param name - The name of the RBD volume
+ :param size - The size in MB of the volume
+ :param filesystem - The type of filesystem to format the block device
+ '''
+
+ # Create the rbd volume
+ # $ rbd create foo --size 50 --image-feature layering
+ command = ['rbd', 'create', '--size', '{}'.format(size), '--image-feature',
+ 'layering', name]
+ debug_command(command)
+ check_call(command)
+
+ # Lift the validation sequence to determine if we actually created the
+ # rbd volume
+ if validate_unique_volume_name(name):
+ # we failed to create the RBD volume. whoops
+ action_fail('RBD Volume not listed after creation.')
+ print('Ceph RBD volume {} not found in rbd list'.format(name))
+ # hack, needs love if we're killing the process thread this deep in
+ # the call stack.
+ sys.exit(0)
+
+ mount = ['rbd', 'map', name]
+ debug_command(mount)
+ device_path = check_output(mount).strip()
+
+ try:
+ format_command = ['mkfs.{}'.format(filesystem), device_path]
+ debug_command(format_command)
+ check_call(format_command)
+ unmount = ['rbd', 'unmap', name]
+ debug_command(unmount)
+ check_call(unmount)
+ except CalledProcessError:
+ print('Failed to format filesystem and unmount. RBD created but not'
+ ' enlisted.')
+ action_fail('Failed to format filesystem and unmount.'
+ ' RDB created but not enlisted.')
+
+
+def is_ceph_healthy():
+ ''' Probe the remote ceph cluster for health status '''
+ command = ['ceph', 'health']
+ debug_command(command)
+ health_output = check_output(command)
+ if b'HEALTH_OK' in health_output:
+ return True
+ else:
+ return False
+
+
+def get_monitors():
+ ''' Parse the monitors out of /etc/ceph/ceph.conf '''
+ found_hosts = []
+ # This is kind of hacky. We should be piping this in from juju relations
+ with open('/etc/ceph/ceph.conf', 'r') as ceph_conf:
+ for line in ceph_conf.readlines():
+ if 'mon host' in line:
+ # strip out the key definition
+ hosts = line.lstrip('mon host = ').split(' ')
+ for host in hosts:
+ found_hosts.append(host)
+ return found_hosts
+
+
+def get_available_space():
+ ''' Determine the space available in the RBD pool. Throw an exception if
+ the RBD pool ('rbd') isn't found. '''
+ command = 'ceph df -f json'.split()
+ debug_command(command)
+ out = check_output(command).decode('utf-8')
+ data = json.loads(out)
+ for pool in data['pools']:
+ if pool['name'] == 'rbd':
+ return int(pool['stats']['max_avail'] / (1024 * 1024))
+ raise UnknownAvailableSpaceException('Unable to determine available space.') # noqa
+
+
+def validate_unique_volume_name(name):
+ ''' Poll the CEPH-MON services to determine if we have a unique rbd volume
+ name to use. If there is naming collisions, block the request for volume
+ provisioning.
+
+ :param name - The name of the RBD volume
+ '''
+
+ command = ['rbd', 'list']
+ debug_command(command)
+ raw_out = check_output(command)
+
+ # Split the output on newlines
+ # output spec:
+ # $ rbd list
+ # foo
+ # foobar
+ volume_list = raw_out.decode('utf-8').splitlines()
+
+ for volume in volume_list:
+ if volume.strip() == name:
+ return False
+
+ return True
+
+
+def validate_relation():
+ ''' Determine if we are related to ceph. If we are not, we should
+ note this in the action output and fail this action run. We are relying
+ on specific files in specific paths to be placed in order for this function
+ to work. This method verifies those files are placed. '''
+
+ # TODO: Validate that the ceph-common package is installed
+ if not is_state('ceph-storage.available'):
+ message = 'Failed to detect connected ceph-mon'
+ print(message)
+ action_set({'pre-req.ceph-relation': message})
+ return False
+
+ if not os.path.isfile('/etc/ceph/ceph.conf'):
+ message = 'No Ceph configuration found in /etc/ceph/ceph.conf'
+ print(message)
+ action_set({'pre-req.ceph-configuration': message})
+ return False
+
+ # TODO: Validate ceph key
+
+ return True
+
+
+def validate_space(size):
+ if action_get_or_default('skip-size-check'):
+ return True
+ available_space = get_available_space()
+ if available_space < size:
+ msg = 'Unable to allocate RBD of size {}MB, only {}MB are available.'
+ action_fail(msg.format(size, available_space))
+ return False
+ return True
+
+
+def validate_parameters(name, fs, mode):
+ ''' Validate the user inputs to ensure they conform to what the
+ action expects. This method will check the naming characters used
+ for the rbd volume, ensure they have selected a fstype we are expecting
+ and the mode against our whitelist '''
+ name_regex = '^[a-zA-z0-9][a-zA-Z0-9|-]'
+
+ fs_whitelist = ['xfs', 'ext4']
+
+ # see http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes
+ # for supported operations on RBD volumes.
+ mode_whitelist = ['ReadWriteOnce', 'ReadOnlyMany']
+
+ fails = 0
+
+ if not re.match(name_regex, name):
+ message = 'Validation failed for RBD volume-name'
+ action_fail(message)
+ fails = fails + 1
+ action_set({'validation.name': message})
+
+ if fs not in fs_whitelist:
+ message = 'Validation failed for file system'
+ action_fail(message)
+ fails = fails + 1
+ action_set({'validation.filesystem': message})
+
+ if mode not in mode_whitelist:
+ message = "Validation failed for mode"
+ action_fail(message)
+ fails = fails + 1
+ action_set({'validation.mode': message})
+
+ return fails
+
+
+def debug_command(cmd):
+ ''' Print a debug statement of the command invoked '''
+ print("Invoking {}".format(cmd))
+
+
+class UnknownAvailableSpaceException(Exception):
+ pass
+
+
+if __name__ == '__main__':
+ main()
diff --git a/kubernetes-master/actions/debug b/kubernetes-master/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/kubernetes-master/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/kubernetes-master/actions/get-kubeconfig b/kubernetes-master/actions/get-kubeconfig
new file mode 100755
index 0000000..60e67eb
--- /dev/null
+++ b/kubernetes-master/actions/get-kubeconfig
@@ -0,0 +1,75 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import tempfile
+import subprocess
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+
+
+def _kubectl(args):
+ """
+ Executes kubectl with args as arguments
+ """
+ snap_bin = os.path.join(os.sep, "snap", "bin")
+ env = os.environ.copy()
+ env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]])
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return subprocess.check_output(
+ cmd,
+ env=env,
+ stderr=subprocess.STDOUT,
+ )
+
+
+def get_kubeconfig():
+ """
+ Read the kubeconfig on the master and return it as JSON
+ """
+ try:
+ result = _kubectl(["config", "view", "-o", "json", "--raw"])
+ # JSON format verification
+ kubeconfig = json.dumps(json.loads(result))
+ action_set({"kubeconfig": kubeconfig})
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse kubeconfig: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to retrieve kubeconfig: {}".format(str(e)))
+
+
+def apply_manifest():
+ """
+ Applies a user defined manifest with kubectl
+ """
+ _, apply_path = tempfile.mkstemp(suffix=".json")
+ try:
+ manifest = json.loads(action_get("json"))
+ with open(apply_path, "w") as manifest_file:
+ json.dump(manifest, manifest_file)
+ output = _kubectl(["apply", "-f", apply_path])
+
+ action_set(
+ {
+ "summary": "Manifest applied.",
+ "output": output.decode("utf-8"),
+ }
+ )
+ except subprocess.CalledProcessError as e:
+ action_fail(
+ "kubectl failed with exit code {} and message: {}".format(
+ e.returncode, e.output
+ )
+ )
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse JSON manifest: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to apply manifest: {}".format(str(e)))
+ finally:
+ os.unlink(apply_path)
+
+
+action = action_name()
+if action == "get-kubeconfig":
+ get_kubeconfig()
+elif action == "apply-manifest":
+ apply_manifest()
diff --git a/kubernetes-master/actions/kubectl-actions.py b/kubernetes-master/actions/kubectl-actions.py
new file mode 100755
index 0000000..60e67eb
--- /dev/null
+++ b/kubernetes-master/actions/kubectl-actions.py
@@ -0,0 +1,75 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import tempfile
+import subprocess
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+
+
+def _kubectl(args):
+ """
+ Executes kubectl with args as arguments
+ """
+ snap_bin = os.path.join(os.sep, "snap", "bin")
+ env = os.environ.copy()
+ env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]])
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return subprocess.check_output(
+ cmd,
+ env=env,
+ stderr=subprocess.STDOUT,
+ )
+
+
+def get_kubeconfig():
+ """
+ Read the kubeconfig on the master and return it as JSON
+ """
+ try:
+ result = _kubectl(["config", "view", "-o", "json", "--raw"])
+ # JSON format verification
+ kubeconfig = json.dumps(json.loads(result))
+ action_set({"kubeconfig": kubeconfig})
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse kubeconfig: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to retrieve kubeconfig: {}".format(str(e)))
+
+
+def apply_manifest():
+ """
+ Applies a user defined manifest with kubectl
+ """
+ _, apply_path = tempfile.mkstemp(suffix=".json")
+ try:
+ manifest = json.loads(action_get("json"))
+ with open(apply_path, "w") as manifest_file:
+ json.dump(manifest, manifest_file)
+ output = _kubectl(["apply", "-f", apply_path])
+
+ action_set(
+ {
+ "summary": "Manifest applied.",
+ "output": output.decode("utf-8"),
+ }
+ )
+ except subprocess.CalledProcessError as e:
+ action_fail(
+ "kubectl failed with exit code {} and message: {}".format(
+ e.returncode, e.output
+ )
+ )
+ except json.JSONDecodeError as e:
+ action_fail("Failed to parse JSON manifest: {}".format(str(e)))
+ except Exception as e:
+ action_fail("Failed to apply manifest: {}".format(str(e)))
+ finally:
+ os.unlink(apply_path)
+
+
+action = action_name()
+if action == "get-kubeconfig":
+ get_kubeconfig()
+elif action == "apply-manifest":
+ apply_manifest()
diff --git a/kubernetes-master/actions/namespace-create b/kubernetes-master/actions/namespace-create
new file mode 100755
index 0000000..50e8650
--- /dev/null
+++ b/kubernetes-master/actions/namespace-create
@@ -0,0 +1,60 @@
+#!/usr/local/sbin/charm-env python3
+import os
+from yaml import safe_load as load
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+from charmhelpers.core.templating import render
+from subprocess import check_output
+
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+
+def kubectl(args):
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return check_output(cmd)
+
+
+def namespace_list():
+ y = load(kubectl(['get', 'namespaces', '-o', 'yaml']))
+ ns = [i['metadata']['name'] for i in y['items']]
+ action_set({'namespaces': ', '.join(ns)+'.'})
+ return ns
+
+
+def namespace_create():
+ name = action_get('name')
+ if name in namespace_list():
+ action_fail('Namespace "{}" already exists.'.format(name))
+ return
+
+ render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml',
+ context={'name': name})
+ kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml'])
+ action_set({'msg': 'Namespace "{}" created.'.format(name)})
+
+
+def namespace_delete():
+ name = action_get('name')
+ if name in ['default', 'kube-system']:
+ action_fail('Not allowed to delete "{}".'.format(name))
+ return
+ if name not in namespace_list():
+ action_fail('Namespace "{}" does not exist.'.format(name))
+ return
+ kubectl(['delete', 'ns/'+name])
+ action_set({'msg': 'Namespace "{}" deleted.'.format(name)})
+
+
+action = action_name().replace('namespace-', '')
+if action == 'create':
+ namespace_create()
+elif action == 'list':
+ namespace_list()
+elif action == 'delete':
+ namespace_delete()
diff --git a/kubernetes-master/actions/namespace-delete b/kubernetes-master/actions/namespace-delete
new file mode 100755
index 0000000..50e8650
--- /dev/null
+++ b/kubernetes-master/actions/namespace-delete
@@ -0,0 +1,60 @@
+#!/usr/local/sbin/charm-env python3
+import os
+from yaml import safe_load as load
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+from charmhelpers.core.templating import render
+from subprocess import check_output
+
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+
+def kubectl(args):
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return check_output(cmd)
+
+
+def namespace_list():
+ y = load(kubectl(['get', 'namespaces', '-o', 'yaml']))
+ ns = [i['metadata']['name'] for i in y['items']]
+ action_set({'namespaces': ', '.join(ns)+'.'})
+ return ns
+
+
+def namespace_create():
+ name = action_get('name')
+ if name in namespace_list():
+ action_fail('Namespace "{}" already exists.'.format(name))
+ return
+
+ render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml',
+ context={'name': name})
+ kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml'])
+ action_set({'msg': 'Namespace "{}" created.'.format(name)})
+
+
+def namespace_delete():
+ name = action_get('name')
+ if name in ['default', 'kube-system']:
+ action_fail('Not allowed to delete "{}".'.format(name))
+ return
+ if name not in namespace_list():
+ action_fail('Namespace "{}" does not exist.'.format(name))
+ return
+ kubectl(['delete', 'ns/'+name])
+ action_set({'msg': 'Namespace "{}" deleted.'.format(name)})
+
+
+action = action_name().replace('namespace-', '')
+if action == 'create':
+ namespace_create()
+elif action == 'list':
+ namespace_list()
+elif action == 'delete':
+ namespace_delete()
diff --git a/kubernetes-master/actions/namespace-list b/kubernetes-master/actions/namespace-list
new file mode 100755
index 0000000..50e8650
--- /dev/null
+++ b/kubernetes-master/actions/namespace-list
@@ -0,0 +1,60 @@
+#!/usr/local/sbin/charm-env python3
+import os
+from yaml import safe_load as load
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_set,
+ action_fail,
+ action_name
+)
+from charmhelpers.core.templating import render
+from subprocess import check_output
+
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+
+def kubectl(args):
+ cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"]
+ cmd.extend(args)
+ return check_output(cmd)
+
+
+def namespace_list():
+ y = load(kubectl(['get', 'namespaces', '-o', 'yaml']))
+ ns = [i['metadata']['name'] for i in y['items']]
+ action_set({'namespaces': ', '.join(ns)+'.'})
+ return ns
+
+
+def namespace_create():
+ name = action_get('name')
+ if name in namespace_list():
+ action_fail('Namespace "{}" already exists.'.format(name))
+ return
+
+ render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml',
+ context={'name': name})
+ kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml'])
+ action_set({'msg': 'Namespace "{}" created.'.format(name)})
+
+
+def namespace_delete():
+ name = action_get('name')
+ if name in ['default', 'kube-system']:
+ action_fail('Not allowed to delete "{}".'.format(name))
+ return
+ if name not in namespace_list():
+ action_fail('Namespace "{}" does not exist.'.format(name))
+ return
+ kubectl(['delete', 'ns/'+name])
+ action_set({'msg': 'Namespace "{}" deleted.'.format(name)})
+
+
+action = action_name().replace('namespace-', '')
+if action == 'create':
+ namespace_create()
+elif action == 'list':
+ namespace_list()
+elif action == 'delete':
+ namespace_delete()
diff --git a/kubernetes-master/actions/restart b/kubernetes-master/actions/restart
new file mode 100755
index 0000000..d130733
--- /dev/null
+++ b/kubernetes-master/actions/restart
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+set +ex
+
+# Restart the apiserver, controller-manager, and scheduler
+
+systemctl restart snap.kube-apiserver.daemon
+action-set apiserver.status='restarted'
+
+systemctl restart snap.kube-controller-manager.daemon
+action-set controller-manager.status='restarted'
+
+systemctl restart snap.kube-scheduler.daemon
+action-set kube-scheduler.status='restarted'
diff --git a/kubernetes-master/actions/upgrade b/kubernetes-master/actions/upgrade
new file mode 100755
index 0000000..ecff71f
--- /dev/null
+++ b/kubernetes-master/actions/upgrade
@@ -0,0 +1,9 @@
+#!/bin/bash
+set -eux
+
+if [[ "$(action-get fix-cluster-name)" == "true" ]]; then
+ charms.reactive set_state 'kubernetes-master.cdk-addons.unique-cluster-tag'
+fi
+
+charms.reactive set_state kubernetes-master.upgrade-specified
+exec hooks/config-changed
diff --git a/kubernetes-master/actions/user-create b/kubernetes-master/actions/user-create
new file mode 100755
index 0000000..3e6828c
--- /dev/null
+++ b/kubernetes-master/actions/user-create
@@ -0,0 +1,105 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import re
+import sys
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+from charms import layer
+
+os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin")
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+
+def protect_resources(name):
+ """Do not allow the action to operate on names used by Charmed Kubernetes."""
+ protected_names = [
+ "admin",
+ "system:kube-controller-manager",
+ "kube-controller-manager",
+ "system:kube-proxy",
+ "kube-proxy",
+ "system:kube-scheduler",
+ "kube-scheduler",
+ "system:monitoring",
+ ]
+ if name.startswith("kubelet") or name in protected_names:
+ action_fail('Not allowed to {} "{}".'.format(action, name))
+ sys.exit(0)
+
+
+def user_list():
+ """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
+ secrets = layer.kubernetes_master.get_secret_names()
+ action_set({"users": ", ".join(list(secrets))})
+ return secrets
+
+
+def user_create():
+ user = action_get("name")
+ groups = action_get("groups") or ""
+ protect_resources(user)
+
+ users = user_list()
+ if user in list(users):
+ action_fail('User "{}" already exists.'.format(user))
+ return
+
+ # Validate the name
+ if re.search("[^0-9A-Za-z:@.-]+", user):
+ msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'"
+ action_fail(msg)
+ return
+
+ # Create the secret
+ # TODO: make the token format less magical so it doesn't get out of
+ # sync with the function that creates secrets in k8s-master.py.
+ token = "{}::{}".format(user, layer.kubernetes_master.token_generator())
+ if not layer.kubernetes_master.create_secret(token, user, user, groups):
+ action_fail("Failed to create secret for: {}".format(user))
+ return
+
+ # Create a kubeconfig
+ ca_crt = layer.kubernetes_common.ca_crt_path
+ kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user)
+ public_address, public_port = layer.kubernetes_master.get_api_endpoint()
+ public_server = "https://{0}:{1}".format(public_address, public_port)
+
+ layer.kubernetes_common.create_kubeconfig(
+ kubeconfig_path, public_server, ca_crt, token=token, user=user
+ )
+ os.chmod(kubeconfig_path, 0o644)
+
+ # Tell the people what they've won
+ fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path)
+ action_set({"msg": 'User "{}" created.'.format(user)})
+ action_set({"users": ", ".join(list(users) + [user])})
+ action_set({"kubeconfig": fetch_cmd})
+
+
+def user_delete():
+ user = action_get("name")
+ protect_resources(user)
+
+ users = user_list()
+ if user not in list(users):
+ action_fail('User "{}" does not exist.'.format(user))
+ return
+
+ # Delete the secret
+ secret_id = users[user]
+ layer.kubernetes_master.delete_secret(secret_id)
+
+ action_set({"msg": 'User "{}" deleted.'.format(user)})
+ action_set({"users": ", ".join(u for u in list(users) if u != user)})
+
+
+action = action_name().replace("user-", "")
+if action == "create":
+ user_create()
+elif action == "list":
+ user_list()
+elif action == "delete":
+ user_delete()
diff --git a/kubernetes-master/actions/user-delete b/kubernetes-master/actions/user-delete
new file mode 100755
index 0000000..3e6828c
--- /dev/null
+++ b/kubernetes-master/actions/user-delete
@@ -0,0 +1,105 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import re
+import sys
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+from charms import layer
+
+os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin")
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+
+def protect_resources(name):
+ """Do not allow the action to operate on names used by Charmed Kubernetes."""
+ protected_names = [
+ "admin",
+ "system:kube-controller-manager",
+ "kube-controller-manager",
+ "system:kube-proxy",
+ "kube-proxy",
+ "system:kube-scheduler",
+ "kube-scheduler",
+ "system:monitoring",
+ ]
+ if name.startswith("kubelet") or name in protected_names:
+ action_fail('Not allowed to {} "{}".'.format(action, name))
+ sys.exit(0)
+
+
+def user_list():
+ """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
+ secrets = layer.kubernetes_master.get_secret_names()
+ action_set({"users": ", ".join(list(secrets))})
+ return secrets
+
+
+def user_create():
+ user = action_get("name")
+ groups = action_get("groups") or ""
+ protect_resources(user)
+
+ users = user_list()
+ if user in list(users):
+ action_fail('User "{}" already exists.'.format(user))
+ return
+
+ # Validate the name
+ if re.search("[^0-9A-Za-z:@.-]+", user):
+ msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'"
+ action_fail(msg)
+ return
+
+ # Create the secret
+ # TODO: make the token format less magical so it doesn't get out of
+ # sync with the function that creates secrets in k8s-master.py.
+ token = "{}::{}".format(user, layer.kubernetes_master.token_generator())
+ if not layer.kubernetes_master.create_secret(token, user, user, groups):
+ action_fail("Failed to create secret for: {}".format(user))
+ return
+
+ # Create a kubeconfig
+ ca_crt = layer.kubernetes_common.ca_crt_path
+ kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user)
+ public_address, public_port = layer.kubernetes_master.get_api_endpoint()
+ public_server = "https://{0}:{1}".format(public_address, public_port)
+
+ layer.kubernetes_common.create_kubeconfig(
+ kubeconfig_path, public_server, ca_crt, token=token, user=user
+ )
+ os.chmod(kubeconfig_path, 0o644)
+
+ # Tell the people what they've won
+ fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path)
+ action_set({"msg": 'User "{}" created.'.format(user)})
+ action_set({"users": ", ".join(list(users) + [user])})
+ action_set({"kubeconfig": fetch_cmd})
+
+
+def user_delete():
+ user = action_get("name")
+ protect_resources(user)
+
+ users = user_list()
+ if user not in list(users):
+ action_fail('User "{}" does not exist.'.format(user))
+ return
+
+ # Delete the secret
+ secret_id = users[user]
+ layer.kubernetes_master.delete_secret(secret_id)
+
+ action_set({"msg": 'User "{}" deleted.'.format(user)})
+ action_set({"users": ", ".join(u for u in list(users) if u != user)})
+
+
+action = action_name().replace("user-", "")
+if action == "create":
+ user_create()
+elif action == "list":
+ user_list()
+elif action == "delete":
+ user_delete()
diff --git a/kubernetes-master/actions/user-list b/kubernetes-master/actions/user-list
new file mode 100755
index 0000000..3e6828c
--- /dev/null
+++ b/kubernetes-master/actions/user-list
@@ -0,0 +1,105 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import re
+import sys
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+from charms import layer
+
+os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin")
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+
+def protect_resources(name):
+ """Do not allow the action to operate on names used by Charmed Kubernetes."""
+ protected_names = [
+ "admin",
+ "system:kube-controller-manager",
+ "kube-controller-manager",
+ "system:kube-proxy",
+ "kube-proxy",
+ "system:kube-scheduler",
+ "kube-scheduler",
+ "system:monitoring",
+ ]
+ if name.startswith("kubelet") or name in protected_names:
+ action_fail('Not allowed to {} "{}".'.format(action, name))
+ sys.exit(0)
+
+
+def user_list():
+ """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
+ secrets = layer.kubernetes_master.get_secret_names()
+ action_set({"users": ", ".join(list(secrets))})
+ return secrets
+
+
+def user_create():
+ user = action_get("name")
+ groups = action_get("groups") or ""
+ protect_resources(user)
+
+ users = user_list()
+ if user in list(users):
+ action_fail('User "{}" already exists.'.format(user))
+ return
+
+ # Validate the name
+ if re.search("[^0-9A-Za-z:@.-]+", user):
+ msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'"
+ action_fail(msg)
+ return
+
+ # Create the secret
+ # TODO: make the token format less magical so it doesn't get out of
+ # sync with the function that creates secrets in k8s-master.py.
+ token = "{}::{}".format(user, layer.kubernetes_master.token_generator())
+ if not layer.kubernetes_master.create_secret(token, user, user, groups):
+ action_fail("Failed to create secret for: {}".format(user))
+ return
+
+ # Create a kubeconfig
+ ca_crt = layer.kubernetes_common.ca_crt_path
+ kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user)
+ public_address, public_port = layer.kubernetes_master.get_api_endpoint()
+ public_server = "https://{0}:{1}".format(public_address, public_port)
+
+ layer.kubernetes_common.create_kubeconfig(
+ kubeconfig_path, public_server, ca_crt, token=token, user=user
+ )
+ os.chmod(kubeconfig_path, 0o644)
+
+ # Tell the people what they've won
+ fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path)
+ action_set({"msg": 'User "{}" created.'.format(user)})
+ action_set({"users": ", ".join(list(users) + [user])})
+ action_set({"kubeconfig": fetch_cmd})
+
+
+def user_delete():
+ user = action_get("name")
+ protect_resources(user)
+
+ users = user_list()
+ if user not in list(users):
+ action_fail('User "{}" does not exist.'.format(user))
+ return
+
+ # Delete the secret
+ secret_id = users[user]
+ layer.kubernetes_master.delete_secret(secret_id)
+
+ action_set({"msg": 'User "{}" deleted.'.format(user)})
+ action_set({"users": ", ".join(u for u in list(users) if u != user)})
+
+
+action = action_name().replace("user-", "")
+if action == "create":
+ user_create()
+elif action == "list":
+ user_list()
+elif action == "delete":
+ user_delete()
diff --git a/kubernetes-master/actions/user_actions.py b/kubernetes-master/actions/user_actions.py
new file mode 100755
index 0000000..3e6828c
--- /dev/null
+++ b/kubernetes-master/actions/user_actions.py
@@ -0,0 +1,105 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import re
+import sys
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name
+from charms import layer
+
+os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin")
+
+# Import charm layers and start reactive
+layer.import_layer_libs()
+hookenv._run_atstart()
+
+
+def protect_resources(name):
+ """Do not allow the action to operate on names used by Charmed Kubernetes."""
+ protected_names = [
+ "admin",
+ "system:kube-controller-manager",
+ "kube-controller-manager",
+ "system:kube-proxy",
+ "kube-proxy",
+ "system:kube-scheduler",
+ "kube-scheduler",
+ "system:monitoring",
+ ]
+ if name.startswith("kubelet") or name in protected_names:
+ action_fail('Not allowed to {} "{}".'.format(action, name))
+ sys.exit(0)
+
+
+def user_list():
+ """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
+ secrets = layer.kubernetes_master.get_secret_names()
+ action_set({"users": ", ".join(list(secrets))})
+ return secrets
+
+
+def user_create():
+ user = action_get("name")
+ groups = action_get("groups") or ""
+ protect_resources(user)
+
+ users = user_list()
+ if user in list(users):
+ action_fail('User "{}" already exists.'.format(user))
+ return
+
+ # Validate the name
+ if re.search("[^0-9A-Za-z:@.-]+", user):
+ msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'"
+ action_fail(msg)
+ return
+
+ # Create the secret
+ # TODO: make the token format less magical so it doesn't get out of
+ # sync with the function that creates secrets in k8s-master.py.
+ token = "{}::{}".format(user, layer.kubernetes_master.token_generator())
+ if not layer.kubernetes_master.create_secret(token, user, user, groups):
+ action_fail("Failed to create secret for: {}".format(user))
+ return
+
+ # Create a kubeconfig
+ ca_crt = layer.kubernetes_common.ca_crt_path
+ kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user)
+ public_address, public_port = layer.kubernetes_master.get_api_endpoint()
+ public_server = "https://{0}:{1}".format(public_address, public_port)
+
+ layer.kubernetes_common.create_kubeconfig(
+ kubeconfig_path, public_server, ca_crt, token=token, user=user
+ )
+ os.chmod(kubeconfig_path, 0o644)
+
+ # Tell the people what they've won
+ fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path)
+ action_set({"msg": 'User "{}" created.'.format(user)})
+ action_set({"users": ", ".join(list(users) + [user])})
+ action_set({"kubeconfig": fetch_cmd})
+
+
+def user_delete():
+ user = action_get("name")
+ protect_resources(user)
+
+ users = user_list()
+ if user not in list(users):
+ action_fail('User "{}" does not exist.'.format(user))
+ return
+
+ # Delete the secret
+ secret_id = users[user]
+ layer.kubernetes_master.delete_secret(secret_id)
+
+ action_set({"msg": 'User "{}" deleted.'.format(user)})
+ action_set({"users": ", ".join(u for u in list(users) if u != user)})
+
+
+action = action_name().replace("user-", "")
+if action == "create":
+ user_create()
+elif action == "list":
+ user_list()
+elif action == "delete":
+ user_delete()
diff --git a/kubernetes-master/bin/charm-env b/kubernetes-master/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/kubernetes-master/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/kubernetes-master/bin/layer_option b/kubernetes-master/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/kubernetes-master/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/kubernetes-master/config.yaml b/kubernetes-master/config.yaml
new file mode 100644
index 0000000..ba7cf05
--- /dev/null
+++ b/kubernetes-master/config.yaml
@@ -0,0 +1,465 @@
+# Copyright 2016 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"options":
+ # snap_proxy and snap_proxy_url have been deprecated for some time.
+ # If your charm still needs them, add these config items manually
+ # to your charm's config.yaml.
+ # snap_proxy:
+ # description: >
+ # DEPRECATED. Use snap-http-proxy and snap-https-proxy model configuration settings.
+ # HTTP/HTTPS web proxy for Snappy to use when accessing the snap store.
+ # type: string
+ # default: ""
+ # snap_proxy_url:
+ # default: ""
+ # type: string
+ # description: >
+ # DEPRECATED. Use snap-store-proxy model configuration setting.
+ # The address of a Snap Store Proxy to use for snaps e.g. http://snap-proxy.example.com
+ "snapd_refresh":
+ "default": "max"
+ "type": "string"
+ "description": |
+ How often snapd handles updates for installed snaps. Setting an empty
+ string will check 4x per day. Set to "max" to delay the refresh as long
+ as possible. You may also set a custom string as described in the
+ 'refresh.timer' section here:
+ https://forum.snapcraft.io/t/system-options/87
+
+ DEPRECATED in 1.19: Manage installed snap versions with the snap-store-proxy model config.
+ See: https://snapcraft.io/snap-store-proxy and https://juju.is/docs/offline-mode-strategies#heading--snap-specific-proxy
+ "nagios_context":
+ "default": "juju"
+ "type": "string"
+ "description": |
+ Used by the nrpe subordinate charms.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ "nagios_servicegroups":
+ "default": ""
+ "type": "string"
+ "description": |
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
+ "sysctl":
+ "type": "string"
+ "default": "{ net.ipv4.conf.all.forwarding : 1, net.ipv4.neigh.default.gc_thresh1\
+ \ : 128, net.ipv4.neigh.default.gc_thresh2 : 28672, net.ipv4.neigh.default.gc_thresh3\
+ \ : 32768, net.ipv6.neigh.default.gc_thresh1 : 128, net.ipv6.neigh.default.gc_thresh2\
+ \ : 28672, net.ipv6.neigh.default.gc_thresh3 : 32768, fs.inotify.max_user_instances\
+ \ : 8192, fs.inotify.max_user_watches : 1048576, kernel.panic : 10, kernel.panic_on_oops:\
+ \ 1, vm.overcommit_memory : 1 }"
+ "description": |
+ YAML formatted associative array of sysctl values, e.g.:
+ '{kernel.pid_max : 4194303 }'. Note that kube-proxy handles
+ the conntrack settings. The proper way to alter them is to
+ use the proxy-extra-args config to set them, e.g.:
+ juju config kubernetes-master proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000"
+ juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000"
+ The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore
+ kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between
+ the setting of conntrack-max-per-core vs nf_conntrack_max.
+ "proxy-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kube-proxy. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kube-apiserver being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ "extra_packages":
+ "description": >
+ Space separated list of extra deb packages to install.
+ "type": "string"
+ "default": ""
+ "package_status":
+ "default": "install"
+ "type": "string"
+ "description": >
+ The status of service-affecting packages will be set to this
+ value in the dpkg database. Valid values are "install" and "hold".
+ "install_sources":
+ "description": >
+ List of extra apt sources, per charm-helpers standard
+ format (a yaml list of strings encoded as a string). Each source
+ may be either a line that can be added directly to
+ sources.list(5), or in the form ppa:/ for adding
+ Personal Package Archives, or a distribution component to enable.
+ "type": "string"
+ "default": ""
+ "install_keys":
+ "description": >
+ List of signing keys for install_sources package sources, per
+ charmhelpers standard format (a yaml list of strings encoded as
+ a string). The keys should be the full ASCII armoured GPG public
+ keys. While GPG key ids are also supported and looked up on a
+ keyserver, operators should be aware that this mechanism is
+ insecure. null can be used if a standard package signing key is
+ used that will already be installed on the machine, and for PPA
+ sources where the package signing key is securely retrieved from
+ Launchpad.
+ "type": "string"
+ "default": ""
+ "ha-cluster-vip":
+ "type": "string"
+ "description": |
+ Virtual IP for the charm to use with the HA Cluster subordinate charm
+ Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are
+ separated by spaces.
+ "default": ""
+ "ha-cluster-dns":
+ "type": "string"
+ "description": |
+ DNS entry to use with the HA Cluster subordinate charm.
+ Mutually exclusive with ha-cluster-vip.
+ "default": ""
+ "audit-policy":
+ "type": "string"
+ "default": |
+ apiVersion: audit.k8s.io/v1
+ kind: Policy
+ rules:
+ # Don't log read-only requests from the apiserver
+ - level: None
+ users: ["system:apiserver"]
+ verbs: ["get", "list", "watch"]
+ # Don't log kube-proxy watches
+ - level: None
+ users: ["system:kube-proxy"]
+ verbs: ["watch"]
+ resources:
+ - resources: ["endpoints", "services"]
+ # Don't log nodes getting their own status
+ - level: None
+ userGroups: ["system:nodes"]
+ verbs: ["get"]
+ resources:
+ - resources: ["nodes"]
+ # Don't log kube-controller-manager and kube-scheduler getting endpoints
+ - level: None
+ users: ["system:unsecured"]
+ namespaces: ["kube-system"]
+ verbs: ["get"]
+ resources:
+ - resources: ["endpoints"]
+ # Log everything else at the Request level.
+ - level: Request
+ omitStages:
+ - RequestReceived
+ "description": |
+ Audit policy passed to kube-apiserver via --audit-policy-file.
+ For more info, please refer to the upstream documentation at
+ https://kubernetes.io/docs/tasks/debug-application-cluster/audit/
+ "audit-webhook-config":
+ "type": "string"
+ "default": ""
+ "description": |
+ Audit webhook config passed to kube-apiserver via --audit-webhook-config-file.
+ For more info, please refer to the upstream documentation at
+ https://kubernetes.io/docs/tasks/debug-application-cluster/audit/
+ "addons-registry":
+ "type": "string"
+ "default": ""
+ "description": |
+ Specify the docker registry to use when applying addons.
+
+ DEPRECATED in 1.15: Use the broader 'image-registry' config option instead. If both
+ options are set, 'addons-registry' will be used to configure the cdk-addons snap until
+ v1.17 is released. After that, the 'addons-registry' option will have no effect.
+ "image-registry":
+ "type": "string"
+ "default": "rocks.canonical.com:443/cdk"
+ "description": |
+ Container image registry to use for CDK. This includes addons like the Kubernetes dashboard,
+ metrics server, ingress, and dns along with non-addon images including the pause
+ container and default backend image.
+ "enable-dashboard-addons":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": "Deploy the Kubernetes Dashboard"
+ "dns-provider":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ DNS provider addon to use. Can be "auto", "core-dns", "kube-dns", or
+ "none".
+
+ CoreDNS is only supported on Kubernetes 1.14+.
+
+ When set to "auto", the behavior is as follows:
+ - New deployments of Kubernetes 1.14+ will use CoreDNS
+ - New deployments of Kubernetes 1.13 or older will use KubeDNS
+ - Upgraded deployments will continue to use whichever provider was
+ previously used.
+ "dns_domain":
+ "type": "string"
+ "default": "cluster.local"
+ "description": "The local domain for cluster dns"
+ "extra_sans":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space-separated list of extra SAN entries to add to the x509 certificate
+ created for the master nodes.
+ "service-cidr":
+ "type": "string"
+ "default": "10.152.183.0/24"
+ "description": "CIDR to user for Kubernetes services. Cannot be changed after\
+ \ deployment."
+ "allow-privileged":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Allow kube-apiserver to run in privileged mode. Supported values are
+ "true", "false", and "auto". If "true", kube-apiserver will run in
+ privileged mode by default. If "false", kube-apiserver will never run in
+ privileged mode. If "auto", kube-apiserver will not run in privileged
+ mode by default, but will switch to privileged mode if gpu hardware is
+ detected on a worker node.
+ "enable-nvidia-plugin":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Load the nvidia device plugin daemonset. Supported values are
+ "auto" and "false". When "auto", the daemonset will be loaded
+ only if GPUs are detected. When "false" the nvidia device plugin
+ will not be loaded.
+ "channel":
+ "type": "string"
+ "default": "1.21/stable"
+ "description": |
+ Snap channel to install Kubernetes master services from
+ "client_password":
+ "type": "string"
+ "default": ""
+ "description": |
+ Password to be used for admin user (leave empty for random password).
+ "api-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kube-apiserver. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kube-apiserver being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ "controller-manager-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kube-controller-manager. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kube-controller-manager being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ "scheduler-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kube-scheduler. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kube-scheduler being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ "authorization-mode":
+ "type": "string"
+ "default": "Node,RBAC"
+ "description": |
+ Comma separated authorization modes. Allowed values are
+ "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow".
+ "require-manual-upgrade":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ When true, master nodes will not be upgraded until the user triggers
+ it manually by running the upgrade action.
+ "storage-backend":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ The storage backend for kube-apiserver persistence. Can be "etcd2", "etcd3", or
+ "auto". Auto mode will select etcd3 on new installations, or etcd2 on upgrades.
+ "enable-metrics":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ If true the metrics server for Kubernetes will be deployed onto the cluster.
+ "default-storage":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ The storage class to make the default storage class. Allowed values are "auto",
+ "none", "ceph-xfs", "ceph-ext4", "cephfs". Note: Only works in Kubernetes >= 1.10
+ "cephfs-mounter":
+ "type": "string"
+ "default": "default"
+ "description": |
+ The client driver used for cephfs based storage. Options are "fuse", "kernel" and "default".
+ "keystone-policy":
+ "default": |
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: k8s-auth-policy
+ namespace: kube-system
+ labels:
+ k8s-app: k8s-keystone-auth
+ data:
+ policies: |
+ [
+ {
+ "resource": {
+ "verbs": ["get", "list", "watch"],
+ "resources": ["*"],
+ "version": "*",
+ "namespace": "*"
+ },
+ "match": [
+ {
+ "type": "role",
+ "values": ["k8s-viewers"]
+ },
+ {
+ "type": "project",
+ "values": ["k8s"]
+ }
+ ]
+ },
+ {
+ "resource": {
+ "verbs": ["*"],
+ "resources": ["*"],
+ "version": "*",
+ "namespace": "default"
+ },
+ "match": [
+ {
+ "type": "role",
+ "values": ["k8s-users"]
+ },
+ {
+ "type": "project",
+ "values": ["k8s"]
+ }
+ ]
+ },
+ {
+ "resource": {
+ "verbs": ["*"],
+ "resources": ["*"],
+ "version": "*",
+ "namespace": "*"
+ },
+ "match": [
+ {
+ "type": "role",
+ "values": ["k8s-admins"]
+ },
+ {
+ "type": "project",
+ "values": ["k8s"]
+ }
+ ]
+ }
+ ]
+ "type": "string"
+ "description": |
+ Policy for Keystone authorization. This is used when a Keystone charm is
+ related to kubernetes-master in order to provide authorization
+ for Keystone users on the Kubernetes cluster.
+ "enable-keystone-authorization":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ If true and the Keystone charm is related, users will authorize against
+ the Keystone server. Note that if related, users will always authenticate
+ against Keystone.
+ "keystone-ssl-ca":
+ "type": "string"
+ "description": |
+ Keystone certificate authority encoded in base64 for securing communications to Keystone.
+ For example: `juju config kubernetes-master keystone-ssl-ca=$(base64 /path/to/ca.crt)`
+ "default": ""
+ "dashboard-auth":
+ "type": "string"
+ "description": |
+ Method of authentication for the Kubernetes dashboard. Allowed values are "auto",
+ "basic", and "token". If set to "auto", basic auth is used unless Keystone is
+ related to kubernetes-master, in which case token auth is used.
+
+ DEPRECATED: this option has no effect on Kubernetes 1.19 and above.
+ "default": "auto"
+ "loadbalancer-ips":
+ "type": "string"
+ "description": |
+ Space separated list of IP addresses of loadbalancers in front of the control plane.
+ These can be either virtual IP addresses that have been floated in front of the control
+ plane or the IP of a loadbalancer appliance such as an F5. Workers will alternate IP
+ addresses from this list to distribute load - for example If you have 2 IPs and 4 workers,
+ each IP will be used by 2 workers. Note that this will only work if kubeapi-load-balancer
+ is not in use and there is a relation between kubernetes-master:kube-api-endpoint and
+ kubernetes-worker:kube-api-endpoint. If using the kubeapi-load-balancer, see the
+ loadbalancer-ips configuration variable on the kubeapi-load-balancer charm.
+ "default": ""
+ "monitoring-storage":
+ "type": "string"
+ "description": |
+ Configuration to set up volume for influxdb/grafana.
+ e.g
+ influxdb:
+ hostPath:
+ path: /influxdb
+ type: Directory
+ grafana:
+ hostPath:
+ path: /grafana
+ type: Directory
+
+ DEPRECATED: this option has no effect on Kubernetes 1.18 and above.
+ "default": |
+ influxdb:
+ emptyDir: {}
+ grafana:
+ emptyDir: {}
+ "default-cni":
+ "type": "string"
+ "description": |
+ Default CNI network to use when multiple CNI subordinates are related.
+
+ The value of this config should be the application name of a related CNI
+ subordinate. For example:
+
+ juju config kubernetes-master default-cni=flannel
+
+ If unspecified, then the default CNI network is chosen alphabetically.
+ "default": ""
+ "authn-webhook-endpoint":
+ "type": "string"
+ "default": ""
+ "description": |
+ Custom endpoint to check when authenticating kube-apiserver requests.
+ This must be an https url accessible by the k8s-master units. For example:
+
+ https://your.server:8443/authenticate
+
+ When a JSON-serialized TokenReview object is POSTed to this endpoint, it must
+ respond with appropriate authentication details. For more info, please refer
+ to the upstream documentation at
+ https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
diff --git a/kubernetes-master/copyright b/kubernetes-master/copyright
new file mode 100644
index 0000000..8aec8ec
--- /dev/null
+++ b/kubernetes-master/copyright
@@ -0,0 +1,13 @@
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/kubernetes-master/copyright.layer-apt b/kubernetes-master/copyright.layer-apt
new file mode 100644
index 0000000..0814dc1
--- /dev/null
+++ b/kubernetes-master/copyright.layer-apt
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Apt layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubernetes-master/copyright.layer-basic b/kubernetes-master/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubernetes-master/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-coordinator b/kubernetes-master/copyright.layer-coordinator
new file mode 100644
index 0000000..b8518aa
--- /dev/null
+++ b/kubernetes-master/copyright.layer-coordinator
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Coordinator Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubernetes-master/copyright.layer-leadership b/kubernetes-master/copyright.layer-leadership
new file mode 100644
index 0000000..08b983f
--- /dev/null
+++ b/kubernetes-master/copyright.layer-leadership
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Leadership Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubernetes-master/copyright.layer-metrics b/kubernetes-master/copyright.layer-metrics
new file mode 100644
index 0000000..2df15bd
--- /dev/null
+++ b/kubernetes-master/copyright.layer-metrics
@@ -0,0 +1,13 @@
+Copyright 2016 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-nagios b/kubernetes-master/copyright.layer-nagios
new file mode 100644
index 0000000..c80db95
--- /dev/null
+++ b/kubernetes-master/copyright.layer-nagios
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2016, Canonical Ltd.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 3, as
+ published by the Free Software Foundation.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranties of
+ MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
diff --git a/kubernetes-master/copyright.layer-options b/kubernetes-master/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubernetes-master/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-snap b/kubernetes-master/copyright.layer-snap
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-master/copyright.layer-snap
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-status b/kubernetes-master/copyright.layer-status
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/copyright.layer-status
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-vault-kv b/kubernetes-master/copyright.layer-vault-kv
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/copyright.layer-vault-kv
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/copyright.layer-vaultlocker b/kubernetes-master/copyright.layer-vaultlocker
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/copyright.layer-vaultlocker
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/debug-scripts/auth-webhook b/kubernetes-master/debug-scripts/auth-webhook
new file mode 100755
index 0000000..befa79e
--- /dev/null
+++ b/kubernetes-master/debug-scripts/auth-webhook
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+systemctl status cdk.master.auth-webhook.service > $DEBUG_SCRIPT_DIR/auth-webhook-systemctl-status
+
+AUTH_LOG=/root/cdk/auth-webhook/auth-webhook.log
+test -f $AUTH_LOG && cp $AUTH_LOG $DEBUG_SCRIPT_DIR
diff --git a/kubernetes-master/debug-scripts/charm-unitdata b/kubernetes-master/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/kubernetes-master/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/kubernetes-master/debug-scripts/filesystem b/kubernetes-master/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/kubernetes-master/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/kubernetes-master/debug-scripts/juju-logs b/kubernetes-master/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/kubernetes-master/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/kubernetes-master/debug-scripts/juju-network-get b/kubernetes-master/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/kubernetes-master/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/kubernetes-master/debug-scripts/kubectl b/kubernetes-master/debug-scripts/kubectl
new file mode 100755
index 0000000..216231d
--- /dev/null
+++ b/kubernetes-master/debug-scripts/kubectl
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -ux
+
+export PATH=$PATH:/snap/bin
+
+alias kubectl="kubectl --kubeconfig=/root/.kube/config"
+
+kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info
+kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump
+for obj in pods svc ingress secrets pv pvc rc; do
+ kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj
+done
+for obj in nodes; do
+ kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj
+done
diff --git a/kubernetes-master/debug-scripts/kubernetes-master-services b/kubernetes-master/debug-scripts/kubernetes-master-services
new file mode 100755
index 0000000..59d646b
--- /dev/null
+++ b/kubernetes-master/debug-scripts/kubernetes-master-services
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+for service in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+ systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status
+ journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal
+done
+
+# FIXME: grab snap config or something
diff --git a/kubernetes-master/debug-scripts/network b/kubernetes-master/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/kubernetes-master/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/kubernetes-master/debug-scripts/packages b/kubernetes-master/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/kubernetes-master/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/kubernetes-master/debug-scripts/sysctl b/kubernetes-master/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/kubernetes-master/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/kubernetes-master/debug-scripts/systemd b/kubernetes-master/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/kubernetes-master/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/kubernetes-master/debug-scripts/tls-certs b/kubernetes-master/debug-scripts/tls-certs
new file mode 100755
index 0000000..2692e51
--- /dev/null
+++ b/kubernetes-master/debug-scripts/tls-certs
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import shutil
+import traceback
+import debug_script
+from charms import layer
+
+options = layer.options.get('tls-client')
+
+def copy_cert(source_key, name):
+ try:
+ source = options[source_key]
+ dest = os.path.join(debug_script.dir, name)
+ shutil.copy(source, dest)
+ except Exception:
+ traceback.print_exc()
+
+copy_cert('client_certificate_path', 'client.crt')
+copy_cert('server_certificate_path', 'server.crt')
+copy_cert('ca_certificate_path', 'ca.crt')
diff --git a/kubernetes-master/docs/status.md b/kubernetes-master/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/kubernetes-master/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/kubernetes-master/docs/vault-kv.md b/kubernetes-master/docs/vault-kv.md
new file mode 100644
index 0000000..8408256
--- /dev/null
+++ b/kubernetes-master/docs/vault-kv.md
@@ -0,0 +1,98 @@
+
charms.layer.vault_kv
+
+
+
VaultNotReady
+
+```python
+VaultNotReady(self, /, *args, **kwargs)
+```
+
+Exception indicating that Vault was accessed before it was ready.
+
+
VaultUnitKV
+
+```python
+VaultUnitKV(self)
+```
+
+A simplified interface for storing data in Vault, with the data scoped to
+the current unit.
+
+Keys must be strings, but data can be structured as long as it is
+JSON-serializable.
+
+This class can be used as a dict, or you can use `self.get` and `self.set`
+for a more KV-like interface. When values are set, via either style, they
+are immediately persisted to Vault. Values are also cached in memory.
+
+Note: This class is a singleton.
+
+
VaultAppKV
+
+```python
+VaultAppKV(self)
+```
+
+A simplified interface for storing data in Vault, with data shared by every
+unit of the application.
+
+Keys must be strings, but data can be structured as long as it is
+JSON-serializable.
+
+This class can be used as a dict, or you can use `self.get` and `self.set`
+for a more KV-like interface. When values are set, via either style, they
+are immediately persisted to Vault. Values are also cached in memory.
+
+Note: This class is a singleton.
+
+
is_changed
+
+```python
+VaultAppKV.is_changed(self, key)
+```
+
+Determine if the value for the given key has changed since the last
+time `self.update_hashes()` has been called.
+
+In order to detect changes, hashes of the values are also sotred
+in Vault.
+
+
update_hashes
+
+```python
+VaultAppKV.update_hashes(self)
+```
+
+Update the hashes in Vault, thus marking all fields as unchanged.
+
+This is done automatically at exit.
+
+
get_vault_config
+
+```python
+get_vault_config()
+```
+
+Get the config data needed for this application to access Vault.
+
+This is only needed if you're using another application, such as
+VaultLocker, using the secrets backend provided by this layer.
+
+Returns a dictionary containing the following keys:
+
+ * vault_url
+ * secret_backend
+ * role_id
+ * secret_id
+
+Note: This data is cached in [UnitData][] so anything with access to that
+could access Vault as this application.
+
+If any of this data changes (such as the secret_id being rotated), this
+layer will set the `layer.vault-kv.config.changed` flag.
+
+If this is called before the Vault relation is available, it will raise
+`VaultNotReady`.
+
+[UnitData]: https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.unitdata.html
+
diff --git a/kubernetes-master/docs/vaultlocker.md b/kubernetes-master/docs/vaultlocker.md
new file mode 100644
index 0000000..e30f255
--- /dev/null
+++ b/kubernetes-master/docs/vaultlocker.md
@@ -0,0 +1,49 @@
+
charms.layer.vaultlocker
+
+
+
encrypt_storage
+
+```python
+encrypt_storage(storage_name, mountbase=None)
+```
+
+Set up encryption for the given Juju storage entry, and optionally create
+and mount XFS filesystems on the encrypted storage entry location(s).
+
+Note that the storage entry **must** be defined with ``type: block``.
+
+If ``mountbase`` is not given, the location(s) will not be formatted or
+mounted. When interacting with or mounting the location(s) manually, the
+name returned by :func:`decrypted_device` called on the storage entry's
+location should be used in place of the raw location.
+
+If the storage is defined as ``multiple``, the individual locations
+will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}``
+is based on the storage ID. Otherwise, the storage will mounted at
+``{mountbase}/{storage_name}``.
+
+
encrypt_device
+
+```python
+encrypt_device(device, mountpoint=None)
+```
+
+Set up encryption for the given block device, and optionally create and
+mount an XFS filesystem on the encrypted device.
+
+If ``mountpoint`` is not given, the device will not be formatted or
+mounted. When interacting with or mounting the device manually, the
+name returned by :func:`decrypted_device` called on the device name
+should be used in place of the raw device name.
+
+
decrypted_device
+
+```python
+decrypted_device(device)
+```
+
+Returns the mapped device name for the decrypted version of the encrypted
+device.
+
+This mapped device name is what should be used for mounting the device.
+
diff --git a/kubernetes-master/exec.d/docker-compose/charm-pre-install b/kubernetes-master/exec.d/docker-compose/charm-pre-install
new file mode 100644
index 0000000..f0202c5
--- /dev/null
+++ b/kubernetes-master/exec.d/docker-compose/charm-pre-install
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+# This stubs out charm-pre-install coming from layer-docker as a workaround for
+# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed.
diff --git a/kubernetes-master/exec.d/vmware-patch/charm-pre-install b/kubernetes-master/exec.d/vmware-patch/charm-pre-install
new file mode 100755
index 0000000..b5e6d97
--- /dev/null
+++ b/kubernetes-master/exec.d/vmware-patch/charm-pre-install
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+MY_HOSTNAME=$(hostname)
+
+: ${JUJU_UNIT_NAME:=`uuidgen`}
+
+
+if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then
+ juju-log "Detected broken vsphere integration. Applying hostname override"
+
+ FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -)
+ juju-log "Setting hostname to $FRIENDLY_HOSTNAME"
+ if [ ! -f /etc/hostname.orig ]; then
+ mv /etc/hostname /etc/hostname.orig
+ fi
+ echo "${FRIENDLY_HOSTNAME}" > /etc/hostname
+ hostname $FRIENDLY_HOSTNAME
+fi
diff --git a/kubernetes-master/hooks/aws-iam-relation-broken b/kubernetes-master/hooks/aws-iam-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-iam-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-iam-relation-changed b/kubernetes-master/hooks/aws-iam-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-iam-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-iam-relation-created b/kubernetes-master/hooks/aws-iam-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-iam-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-iam-relation-departed b/kubernetes-master/hooks/aws-iam-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-iam-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-iam-relation-joined b/kubernetes-master/hooks/aws-iam-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-iam-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-relation-broken b/kubernetes-master/hooks/aws-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-relation-changed b/kubernetes-master/hooks/aws-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-relation-created b/kubernetes-master/hooks/aws-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-relation-departed b/kubernetes-master/hooks/aws-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/aws-relation-joined b/kubernetes-master/hooks/aws-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/aws-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/azure-relation-broken b/kubernetes-master/hooks/azure-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/azure-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/azure-relation-changed b/kubernetes-master/hooks/azure-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/azure-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/azure-relation-created b/kubernetes-master/hooks/azure-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/azure-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/azure-relation-departed b/kubernetes-master/hooks/azure-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/azure-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/azure-relation-joined b/kubernetes-master/hooks/azure-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/azure-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-client-relation-broken b/kubernetes-master/hooks/ceph-client-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-client-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-client-relation-changed b/kubernetes-master/hooks/ceph-client-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-client-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-client-relation-created b/kubernetes-master/hooks/ceph-client-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-client-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-client-relation-departed b/kubernetes-master/hooks/ceph-client-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-client-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-client-relation-joined b/kubernetes-master/hooks/ceph-client-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-client-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-storage-relation-broken b/kubernetes-master/hooks/ceph-storage-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-storage-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-storage-relation-changed b/kubernetes-master/hooks/ceph-storage-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-storage-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-storage-relation-created b/kubernetes-master/hooks/ceph-storage-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-storage-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-storage-relation-departed b/kubernetes-master/hooks/ceph-storage-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-storage-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ceph-storage-relation-joined b/kubernetes-master/hooks/ceph-storage-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ceph-storage-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/certificates-relation-broken b/kubernetes-master/hooks/certificates-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/certificates-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/certificates-relation-changed b/kubernetes-master/hooks/certificates-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/certificates-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/certificates-relation-created b/kubernetes-master/hooks/certificates-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/certificates-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/certificates-relation-departed b/kubernetes-master/hooks/certificates-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/certificates-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/certificates-relation-joined b/kubernetes-master/hooks/certificates-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/certificates-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cluster-dns-relation-broken b/kubernetes-master/hooks/cluster-dns-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cluster-dns-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cluster-dns-relation-changed b/kubernetes-master/hooks/cluster-dns-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cluster-dns-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cluster-dns-relation-created b/kubernetes-master/hooks/cluster-dns-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cluster-dns-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cluster-dns-relation-departed b/kubernetes-master/hooks/cluster-dns-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cluster-dns-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cluster-dns-relation-joined b/kubernetes-master/hooks/cluster-dns-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cluster-dns-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cni-relation-broken b/kubernetes-master/hooks/cni-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cni-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cni-relation-changed b/kubernetes-master/hooks/cni-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cni-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cni-relation-created b/kubernetes-master/hooks/cni-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cni-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cni-relation-departed b/kubernetes-master/hooks/cni-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cni-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/cni-relation-joined b/kubernetes-master/hooks/cni-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/cni-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/collect-metrics b/kubernetes-master/hooks/collect-metrics
new file mode 100755
index 0000000..8a27863
--- /dev/null
+++ b/kubernetes-master/hooks/collect-metrics
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+import yaml
+import os
+from subprocess import check_output, check_call, CalledProcessError
+
+
+def build_command(doc):
+ values = {}
+ metrics = doc.get("metrics", {})
+ for metric, mdoc in metrics.items():
+ if not mdoc:
+ continue
+ cmd = mdoc.get("command")
+ if cmd:
+ try:
+ value = check_output(cmd, shell=True, universal_newlines=True)
+ except CalledProcessError as e:
+ check_call(['juju-log', '-lERROR',
+ 'Error collecting metric {}:\n{}'.format(
+ metric, e.output)])
+ continue
+ value = value.strip()
+ if value:
+ values[metric] = value
+
+ if not values:
+ return None
+ command = ["add-metric"]
+ for metric, value in values.items():
+ command.append("%s=%s" % (metric, value))
+ return command
+
+
+if __name__ == '__main__':
+ charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, "..")))
+ metrics_yaml = os.path.join(charm_dir, "metrics.yaml")
+ with open(metrics_yaml) as f:
+ doc = yaml.load(f)
+ command = build_command(doc)
+ if command:
+ check_call(command)
diff --git a/kubernetes-master/hooks/config-changed b/kubernetes-master/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/container-runtime-relation-broken b/kubernetes-master/hooks/container-runtime-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/container-runtime-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/container-runtime-relation-changed b/kubernetes-master/hooks/container-runtime-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/container-runtime-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/container-runtime-relation-created b/kubernetes-master/hooks/container-runtime-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/container-runtime-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/container-runtime-relation-departed b/kubernetes-master/hooks/container-runtime-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/container-runtime-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/container-runtime-relation-joined b/kubernetes-master/hooks/container-runtime-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/container-runtime-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/coordinator-relation-broken b/kubernetes-master/hooks/coordinator-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/coordinator-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/coordinator-relation-changed b/kubernetes-master/hooks/coordinator-relation-changed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-master/hooks/coordinator-relation-changed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-master/hooks/coordinator-relation-created b/kubernetes-master/hooks/coordinator-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/coordinator-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/coordinator-relation-departed b/kubernetes-master/hooks/coordinator-relation-departed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-master/hooks/coordinator-relation-departed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-master/hooks/coordinator-relation-joined b/kubernetes-master/hooks/coordinator-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/coordinator-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/dns-provider-relation-broken b/kubernetes-master/hooks/dns-provider-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/dns-provider-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/dns-provider-relation-changed b/kubernetes-master/hooks/dns-provider-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/dns-provider-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/dns-provider-relation-created b/kubernetes-master/hooks/dns-provider-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/dns-provider-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/dns-provider-relation-departed b/kubernetes-master/hooks/dns-provider-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/dns-provider-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/dns-provider-relation-joined b/kubernetes-master/hooks/dns-provider-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/dns-provider-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/etcd-relation-broken b/kubernetes-master/hooks/etcd-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/etcd-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/etcd-relation-changed b/kubernetes-master/hooks/etcd-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/etcd-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/etcd-relation-created b/kubernetes-master/hooks/etcd-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/etcd-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/etcd-relation-departed b/kubernetes-master/hooks/etcd-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/etcd-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/etcd-relation-joined b/kubernetes-master/hooks/etcd-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/etcd-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/gcp-relation-broken b/kubernetes-master/hooks/gcp-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/gcp-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/gcp-relation-changed b/kubernetes-master/hooks/gcp-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/gcp-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/gcp-relation-created b/kubernetes-master/hooks/gcp-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/gcp-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/gcp-relation-departed b/kubernetes-master/hooks/gcp-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/gcp-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/gcp-relation-joined b/kubernetes-master/hooks/gcp-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/gcp-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/grafana-relation-broken b/kubernetes-master/hooks/grafana-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/grafana-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/grafana-relation-changed b/kubernetes-master/hooks/grafana-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/grafana-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/grafana-relation-created b/kubernetes-master/hooks/grafana-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/grafana-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/grafana-relation-departed b/kubernetes-master/hooks/grafana-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/grafana-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/grafana-relation-joined b/kubernetes-master/hooks/grafana-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/grafana-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ha-relation-broken b/kubernetes-master/hooks/ha-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ha-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ha-relation-changed b/kubernetes-master/hooks/ha-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ha-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ha-relation-created b/kubernetes-master/hooks/ha-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ha-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ha-relation-departed b/kubernetes-master/hooks/ha-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ha-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/ha-relation-joined b/kubernetes-master/hooks/ha-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/ha-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/hook.template b/kubernetes-master/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/install b/kubernetes-master/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/keystone-credentials-relation-broken b/kubernetes-master/hooks/keystone-credentials-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/keystone-credentials-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/keystone-credentials-relation-changed b/kubernetes-master/hooks/keystone-credentials-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/keystone-credentials-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/keystone-credentials-relation-created b/kubernetes-master/hooks/keystone-credentials-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/keystone-credentials-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/keystone-credentials-relation-departed b/kubernetes-master/hooks/keystone-credentials-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/keystone-credentials-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/keystone-credentials-relation-joined b/kubernetes-master/hooks/keystone-credentials-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/keystone-credentials-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-api-endpoint-relation-broken b/kubernetes-master/hooks/kube-api-endpoint-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-api-endpoint-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-api-endpoint-relation-changed b/kubernetes-master/hooks/kube-api-endpoint-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-api-endpoint-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-api-endpoint-relation-created b/kubernetes-master/hooks/kube-api-endpoint-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-api-endpoint-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-api-endpoint-relation-departed b/kubernetes-master/hooks/kube-api-endpoint-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-api-endpoint-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-api-endpoint-relation-joined b/kubernetes-master/hooks/kube-api-endpoint-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-api-endpoint-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-control-relation-broken b/kubernetes-master/hooks/kube-control-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-control-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-control-relation-changed b/kubernetes-master/hooks/kube-control-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-control-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-control-relation-created b/kubernetes-master/hooks/kube-control-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-control-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-control-relation-departed b/kubernetes-master/hooks/kube-control-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-control-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-control-relation-joined b/kubernetes-master/hooks/kube-control-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-control-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-masters-relation-broken b/kubernetes-master/hooks/kube-masters-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-masters-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-masters-relation-changed b/kubernetes-master/hooks/kube-masters-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-masters-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-masters-relation-created b/kubernetes-master/hooks/kube-masters-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-masters-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-masters-relation-departed b/kubernetes-master/hooks/kube-masters-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-masters-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/kube-masters-relation-joined b/kubernetes-master/hooks/kube-masters-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/kube-masters-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/leader-elected b/kubernetes-master/hooks/leader-elected
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-master/hooks/leader-elected
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-master/hooks/leader-settings-changed b/kubernetes-master/hooks/leader-settings-changed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-master/hooks/leader-settings-changed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-master/hooks/loadbalancer-relation-broken b/kubernetes-master/hooks/loadbalancer-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/loadbalancer-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/loadbalancer-relation-changed b/kubernetes-master/hooks/loadbalancer-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/loadbalancer-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/loadbalancer-relation-created b/kubernetes-master/hooks/loadbalancer-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/loadbalancer-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/loadbalancer-relation-departed b/kubernetes-master/hooks/loadbalancer-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/loadbalancer-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/loadbalancer-relation-joined b/kubernetes-master/hooks/loadbalancer-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/loadbalancer-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/nrpe-external-master-relation-broken b/kubernetes-master/hooks/nrpe-external-master-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/nrpe-external-master-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/nrpe-external-master-relation-changed b/kubernetes-master/hooks/nrpe-external-master-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/nrpe-external-master-relation-created b/kubernetes-master/hooks/nrpe-external-master-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/nrpe-external-master-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/nrpe-external-master-relation-departed b/kubernetes-master/hooks/nrpe-external-master-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/nrpe-external-master-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/nrpe-external-master-relation-joined b/kubernetes-master/hooks/nrpe-external-master-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/nrpe-external-master-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/openstack-relation-broken b/kubernetes-master/hooks/openstack-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/openstack-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/openstack-relation-changed b/kubernetes-master/hooks/openstack-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/openstack-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/openstack-relation-created b/kubernetes-master/hooks/openstack-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/openstack-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/openstack-relation-departed b/kubernetes-master/hooks/openstack-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/openstack-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/openstack-relation-joined b/kubernetes-master/hooks/openstack-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/openstack-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/post-series-upgrade b/kubernetes-master/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/pre-series-upgrade b/kubernetes-master/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/prometheus-relation-broken b/kubernetes-master/hooks/prometheus-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/prometheus-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/prometheus-relation-changed b/kubernetes-master/hooks/prometheus-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/prometheus-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/prometheus-relation-created b/kubernetes-master/hooks/prometheus-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/prometheus-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/prometheus-relation-departed b/kubernetes-master/hooks/prometheus-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/prometheus-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/prometheus-relation-joined b/kubernetes-master/hooks/prometheus-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/prometheus-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/relations/aws-iam/LICENSE b/kubernetes-master/hooks/relations/aws-iam/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-iam/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/aws-iam/README.md b/kubernetes-master/hooks/relations/aws-iam/README.md
new file mode 100644
index 0000000..5a54cd8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-iam/README.md
@@ -0,0 +1,47 @@
+# aws-iam interface
+
+This interface provides communication between
+[kubernetes-master](https://github.com/charmed-kubernetes/charm-kubernetes-master)
+and [aws-iam](https://github.com/charmed-kubernetes/charm-aws-iam)
+subordinate.
+
+It allows the requires side, aws-iam, to know when the api server is
+up and available and to tell the api server when the webhook.yaml
+file is written so that it may restart and use the webhook.
+
+## Provides (kubernetes-master side)
+
+### States
+ * `aws-iam.available`
+ Indicates that there are one or more units on the other side
+ of the relation
+ * `aws-iam.ready`
+ Indicates that the webhook status has been set. This is used
+ to indicate it is time to restart the API server to pick up
+ the webhook config on the Kubernetes side.
+### Methods
+ * `get_cluster_id`
+ The AWS-IAM charm generates a random cluster ID for the cluster
+ that is needed in the kubectl configuration file. This is
+ retrieved from the relation here.
+ * `set_api_server_status`
+ This is set to indicate if the Kubernetes API server is up and
+ ready for connections. This is needed because the aws-iam charm
+ needs to set up the service it will use in order to add the IP
+ to the extra sans in the ssl certificate used to secure
+ communication between the master and the service.
+
+## Requires (aws-iam side)
+
+### States
+ * `aws-iam.available`
+ Indicates that there are one or more units on the other
+ side of the relation
+### Methods
+ * `set_cluster_id`
+ The AWS-IAM charm generates a random cluster ID for the
+ cluster that is needed in the kubectl configuration file.
+ This is passed over the relation here.
+ * `set_webhook_status`
+ Called to set that the webhook configuration has been written
+ to disk.
\ No newline at end of file
diff --git a/kubernetes-master/hooks/relations/aws-iam/__init__.py b/kubernetes-master/hooks/relations/aws-iam/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/aws-iam/interface.yaml b/kubernetes-master/hooks/relations/aws-iam/interface.yaml
new file mode 100644
index 0000000..d824eaa
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-iam/interface.yaml
@@ -0,0 +1,4 @@
+name: aws-iam
+summary: Used to integrate AWS IAM into kubernetes-master charm
+version: 1
+maintainer: "Mike Wilson "
diff --git a/kubernetes-master/hooks/relations/aws-iam/provides.py b/kubernetes-master/hooks/relations/aws-iam/provides.py
new file mode 100644
index 0000000..2cffae7
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-iam/provides.py
@@ -0,0 +1,35 @@
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag
+
+
+# kubernetes-master side
+class AWSIAMProvides(Endpoint):
+
+ # called automagically before any decorated handlers, but after
+ # flags are set
+ def manage_flags(self):
+ # we want to make sure all the templates and stuff are written
+ # and pods started before we switch the API server over to
+ # use the webhook. This is critical for the webhook template
+ # since the API server will crash if the file isn't there.
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.ready'),
+ self.is_joined and all(unit.received['webhook_status']
+ for unit in self.all_joined_units))
+
+ def get_cluster_id(self):
+ """ Gets randomly generated cluster ID. """
+
+ return self.all_joined_units.received['cluster_id']
+
+ def set_api_server_status(self, status):
+ """ Sets the status of the Kubernetes API server.
+
+ Args:
+ status: Boolean value. True when API server is started
+ and ready to receive requests.
+ """
+
+ for relation in self.relations:
+ relation.to_publish['api_server_state'] = status
diff --git a/kubernetes-master/hooks/relations/aws-iam/requires.py b/kubernetes-master/hooks/relations/aws-iam/requires.py
new file mode 100644
index 0000000..960c265
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-iam/requires.py
@@ -0,0 +1,36 @@
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag
+
+
+# aws-iam side
+class AWSIAMRequires(Endpoint):
+
+ # called automagically before any decorated handlers, but after
+ # flags are set
+ def manage_flags(self):
+ # kubectl is used to deploy the webhook pod. This means that
+ # the api server needs to be up in order to do that. So we
+ # wait until the cluster is up before trying.
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined and all(unit.received['api_server_state']
+ for unit in self.all_joined_units))
+
+ def set_webhook_status(self, status):
+ """ Sets the status of the webhook configuration file.
+
+ Args:
+ status: Boolean value. True when webhook configuration has been
+ written to disk and the API server can be configured to
+ pick that up and restart.
+ """
+ for relation in self.relations:
+ relation.to_publish['webhook_status'] = status
+
+ def set_cluster_id(self, id):
+ """ Sets the randomly generated cluster id. The cluster ID is just
+ a unique value to identify this cluster for AWS-IAM. It is needed
+ by the API server for the kubectl configuration file.
+ """
+
+ for relation in self.relations:
+ relation.to_publish['cluster_id'] = id
diff --git a/kubernetes-master/hooks/relations/aws-integration/.gitignore b/kubernetes-master/hooks/relations/aws-integration/.gitignore
new file mode 100644
index 0000000..ba1431e
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/.gitignore
@@ -0,0 +1,2 @@
+.tox
+__pycache__
diff --git a/kubernetes-master/hooks/relations/aws-integration/LICENSE b/kubernetes-master/hooks/relations/aws-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/aws-integration/README.md b/kubernetes-master/hooks/relations/aws-integration/README.md
new file mode 100644
index 0000000..59abfcf
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `aws-integration` interface communciation protocol
+and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:aws-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:aws-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `aws-integration` interface protocol:
+
+```yaml
+requires:
+ aws:
+ interface: aws-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the aws-integrator charm)
diff --git a/kubernetes-master/hooks/relations/aws-integration/__init__.py b/kubernetes-master/hooks/relations/aws-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/aws-integration/copyright b/kubernetes-master/hooks/relations/aws-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/aws-integration/docs/provides.md b/kubernetes-master/hooks/relations/aws-integration/docs/provides.md
new file mode 100644
index 0000000..57ecb25
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/docs/provides.md
@@ -0,0 +1,179 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the AWS
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for AWS integration features.
+ The AWS integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
+
clear
+
+```python
+IntegrationRequest.clear(self)
+```
+
+Clear this request's cached data.
+
diff --git a/kubernetes-master/hooks/relations/aws-integration/docs/requires.md b/kubernetes-master/hooks/relations/aws-integration/docs/requires.md
new file mode 100644
index 0000000..41607f4
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/docs/requires.md
@@ -0,0 +1,178 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with AWS native features. The integration will
+be provided by the AWS integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of AWS specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific AWS features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the AWS instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
+
+```python
+AWSIntegrationRequires.tag_instance(self, tags)
+```
+
+Request that the given tags be applied to this instance.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
tag_instance_security_group
+
+```python
+AWSIntegrationRequires.tag_instance_security_group(self, tags)
+```
+
+Request that the given tags be applied to this instance's
+machine-specific security group (firewall) created by Juju.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
tag_instance_subnet
+
+```python
+AWSIntegrationRequires.tag_instance_subnet(self, tags)
+```
+
+Request that the given tags be applied to this instance's subnet.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
+
+```python
+AWSIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+AWSIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking (firewalls, subnets, etc).
+
+
enable_load_balancer_management
+
+```python
+AWSIntegrationRequires.enable_load_balancer_management(self)
+```
+
+Request the ability to manage load balancers.
+
+
enable_block_storage_management
+
+```python
+AWSIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+AWSIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+AWSIntegrationRequires.enable_object_storage_access(self, patterns=None)
+```
+
+Request the ability to access object storage.
+
+__Parameters__
+
+- __`patterns` (list)__: If given, restrict access to the resources matching
+ the patterns. If patterns do not start with the S3 ARN prefix
+- __(`arn__:aws:s3:::`), it will be prepended.
+
+
enable_object_storage_management
+
+```python
+AWSIntegrationRequires.enable_object_storage_management(self, patterns=None)
+```
+
+Request the ability to manage object storage.
+
+__Parameters__
+
+- __`patterns` (list)__: If given, restrict management to the resources
+ matching the patterns. If patterns do not start with the S3 ARN
+- __prefix (`arn__:aws:s3:::`), it will be prepended.
+
diff --git a/kubernetes-master/hooks/relations/aws-integration/interface.yaml b/kubernetes-master/hooks/relations/aws-integration/interface.yaml
new file mode 100644
index 0000000..fe3da6d
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: aws-integration
+summary: Interface for connecting to the AWS integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-master/hooks/relations/aws-integration/make_docs b/kubernetes-master/hooks/relations/aws-integration/make_docs
new file mode 100644
index 0000000..72b69c2
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'aws': {'interface': 'aws-integration'}},
+ 'provides': {'aws': {'interface': 'aws-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/aws-integration/provides.py b/kubernetes-master/hooks/relations/aws-integration/provides.py
new file mode 100644
index 0000000..ae94211
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/provides.py
@@ -0,0 +1,288 @@
+"""
+This is the provides side of the interface layer, for use only by the AWS
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for AWS integration features.
+ The AWS integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+import json
+from hashlib import sha256
+
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class AWSIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.aws.requested')
+ def handle_requests():
+ aws = endpoint_from_flag('endpoint.aws.requested')
+ for request in aws.requests:
+ if request.instance_tags:
+ tag_instance(
+ request.instance_id,
+ request.region,
+ request.instance_tags)
+ if request.requested_load_balancer_management:
+ layer.aws.enable_load_balancer_management(
+ request.application_name,
+ request.instance_id,
+ request.region,
+ )
+ # ...
+ request.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ requests = self.requests
+ toggle_flag(self.expand_name('requested'), len(requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @when('endpoint.{endpoint_name}.departed')
+ def cleanup(self):
+ for unit in self.all_departed_units:
+ request = IntegrationRequest(unit)
+ request.clear()
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ return [request for request in self.all_requests if request.changed]
+
+ @property
+ def all_requests(self):
+ """
+ A list of all the #IntegrationRequests that have been made,
+ even if unchanged.
+ """
+ return [IntegrationRequest(unit) for unit in self.all_joined_units]
+
+ @property
+ def application_names(self):
+ """
+ Set of names of all applications that are still joined.
+ """
+ return {unit.application_name for unit in self.all_joined_units}
+
+ @property
+ def unit_instances(self):
+ """
+ Mapping of unit names to instance IDs and regions for all joined units.
+ """
+ return {
+ unit.unit_name: {
+ 'instance-id': unit.received['instance-id'],
+ 'region': unit.received['region'],
+ } for unit in self.all_joined_units
+ }
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+ self._hash = sha256(json.dumps(dict(unit.received),
+ sort_keys=True).encode('utf8')
+ ).hexdigest()
+
+ @property
+ def hash(self):
+ """
+ SHA hash of the data for this request.
+ """
+ return self._hash
+
+ @property
+ def _hash_key(self):
+ endpoint = self._unit.relation.endpoint
+ return endpoint.expand_name('request.{}'.format(self.instance_id))
+
+ @property
+ def changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed.
+ """
+ if not (self.instance_id and self._requested):
+ return False
+ saved_hash = unitdata.kv().get(self._hash_key)
+ result = saved_hash != self.hash
+ return result
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._unit.relation.to_publish.get('completed', {})
+ completed[self.instance_id] = self.hash
+ unitdata.kv().set(self._hash_key, self.hash)
+ self._unit.relation.to_publish['completed'] = completed
+
+ def clear(self):
+ """
+ Clear this request's cached data.
+ """
+ unitdata.kv().unset(self._hash_key)
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def instance_id(self):
+ """
+ The instance ID reported for this request.
+ """
+ return self._unit.received['instance-id']
+
+ @property
+ def region(self):
+ """
+ The region reported for this request.
+ """
+ return self._unit.received['region']
+
+ @property
+ def instance_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-tags', {}))
+
+ @property
+ def instance_security_group_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance's
+ machine-specific security group (firewall).
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-security-group-tags',
+ {}))
+
+ @property
+ def instance_subnet_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance's
+ subnet.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-subnet-tags', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_acm_readonly(self):
+ """
+ Flag indicating whether acm readonly was requested.
+ """
+ return bool(self._unit.received['enable-acm-readonly'])
+
+ @property
+ def requested_acm_fullaccess(self):
+ """
+ Flag indicating whether acm fullaccess was requested.
+ """
+ return bool(self._unit.received['enable-acm-fullaccess'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking (firewalls,
+ subnets, etc) was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_load_balancer_management(self):
+ """
+ Flag indicating whether load balancer management was requested.
+ """
+ return bool(self._unit.received['enable-load-balancer-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def object_storage_access_patterns(self):
+ """
+ List of patterns to which to restrict object storage access.
+ """
+ return list(
+ self._unit.received['object-storage-access-patterns'] or [])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
+
+ @property
+ def object_storage_management_patterns(self):
+ """
+ List of patterns to which to restrict object storage management.
+ """
+ return list(
+ self._unit.received['object-storage-management-patterns'] or [])
diff --git a/kubernetes-master/hooks/relations/aws-integration/pydocmd.yml b/kubernetes-master/hooks/relations/aws-integration/pydocmd.yml
new file mode 100644
index 0000000..70a2e75
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'AWS Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.AWSIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.AWSIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/aws-integration/requires.py b/kubernetes-master/hooks/relations/aws-integration/requires.py
new file mode 100644
index 0000000..c457e02
--- /dev/null
+++ b/kubernetes-master/hooks/relations/aws-integration/requires.py
@@ -0,0 +1,262 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with AWS native features. The integration will
+be provided by the AWS integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of AWS specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific AWS features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the AWS instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import json
+import string
+from hashlib import sha256
+from urllib.parse import urljoin
+from urllib.request import urlopen
+
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from AWS metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class AWSIntegrationRequires(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.aws.joined')
+ def request_aws_integration():
+ aws = endpoint_from_flag('endpoint.aws.joined')
+ aws.request_instance_tags({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ aws.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.aws.ready')
+ def aws_integration_ready():
+ update_config_enable_aws()
+ ```
+ """
+ # the IP is the AWS metadata service, documented here:
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
+ _metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ _instance_id_url = urljoin(_metadata_url, 'instance-id')
+ _az_url = urljoin(_metadata_url, 'placement/availability-zone')
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._instance_id = None
+ self._region = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single AWS integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single AWS integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['instance-id'] = self.instance_id
+ self._to_publish['region'] = self.region
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ completed = self._received.get('completed', {})
+ actual_hash = completed.get(self.instance_id)
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'),
+ self._requested and actual_hash == self._expected_hash)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def instance_id(self):
+ """
+ This unit's instance-id.
+ """
+ if self._instance_id is None:
+ cache_key = self.expand_name('instance-id')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._instance_id = cached
+ else:
+ with urlopen(self._instance_id_url) as fd:
+ self._instance_id = fd.read(READ_BLOCK_SIZE).decode('utf8')
+ unitdata.kv().set(cache_key, self._instance_id)
+ return self._instance_id
+
+ @property
+ def region(self):
+ """
+ The region this unit is in.
+ """
+ if self._region is None:
+ cache_key = self.expand_name('region')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._region = cached
+ else:
+ with urlopen(self._az_url) as fd:
+ az = fd.read(READ_BLOCK_SIZE).decode('utf8')
+ self._region = az.rstrip(string.ascii_lowercase)
+ unitdata.kv().set(cache_key, self._region)
+ return self._region
+
+ @property
+ def _expected_hash(self):
+ return sha256(json.dumps(dict(self._to_publish),
+ sort_keys=True).encode('utf8')).hexdigest()
+
+ @property
+ def _requested(self):
+ # whether or not a request has been issued
+ return self._to_publish['requested']
+
+ def _request(self, keyvals):
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = True
+ clear_flag(self.expand_name('ready'))
+
+ def tag_instance(self, tags):
+ """
+ Request that the given tags be applied to this instance.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-tags': dict(tags)})
+
+ def tag_instance_security_group(self, tags):
+ """
+ Request that the given tags be applied to this instance's
+ machine-specific security group (firewall) created by Juju.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-security-group-tags': dict(tags)})
+
+ def tag_instance_subnet(self, tags):
+ """
+ Request that the given tags be applied to this instance's subnet.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-subnet-tags': dict(tags)})
+
+ def enable_acm_readonly(self):
+ """
+ Request readonly for ACM.
+ """
+ self._request({'enable-acm-readonly': True})
+
+ def enable_acm_fullaccess(self):
+ """
+ Request fullaccess for ACM.
+ """
+ self._request({'enable-acm-fullaccess': True})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking (firewalls, subnets, etc).
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_load_balancer_management(self):
+ """
+ Request the ability to manage load balancers.
+ """
+ self._request({'enable-load-balancer-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns-management': True})
+
+ def enable_object_storage_access(self, patterns=None):
+ """
+ Request the ability to access object storage.
+
+ # Parameters
+ `patterns` (list): If given, restrict access to the resources matching
+ the patterns. If patterns do not start with the S3 ARN prefix
+ (`arn:aws:s3:::`), it will be prepended.
+ """
+ if patterns:
+ for i, pattern in enumerate(patterns):
+ if not pattern.startswith('arn:aws:s3:::'):
+ patterns[i] = 'arn:aws:s3:::{}'.format(pattern)
+ self._request({
+ 'enable-object-storage-access': True,
+ 'object-storage-access-patterns': patterns,
+ })
+
+ def enable_object_storage_management(self, patterns=None):
+ """
+ Request the ability to manage object storage.
+
+ # Parameters
+ `patterns` (list): If given, restrict management to the resources
+ matching the patterns. If patterns do not start with the S3 ARN
+ prefix (`arn:aws:s3:::`), it will be prepended.
+ """
+ if patterns:
+ for i, pattern in enumerate(patterns):
+ if not pattern.startswith('arn:aws:s3:::'):
+ patterns[i] = 'arn:aws:s3:::{}'.format(pattern)
+ self._request({
+ 'enable-object-storage-management': True,
+ 'object-storage-management-patterns': patterns,
+ })
diff --git a/kubernetes-master/hooks/relations/azure-integration/.gitignore b/kubernetes-master/hooks/relations/azure-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/azure-integration/LICENSE b/kubernetes-master/hooks/relations/azure-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/azure-integration/README.md b/kubernetes-master/hooks/relations/azure-integration/README.md
new file mode 100644
index 0000000..ddcae26
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `azure-integration` interface communciation
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:azure-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:azure-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `azure-integration` interface protocol:
+
+```yaml
+requires:
+ azure:
+ interface: azure-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the azure-integrator charm)
diff --git a/kubernetes-master/hooks/relations/azure-integration/__init__.py b/kubernetes-master/hooks/relations/azure-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/azure-integration/copyright b/kubernetes-master/hooks/relations/azure-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/azure-integration/docs/provides.md b/kubernetes-master/hooks/relations/azure-integration/docs/provides.md
new file mode 100644
index 0000000..4348dff
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/docs/provides.md
@@ -0,0 +1,175 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the Azure
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for Azure integration features.
+ The Azure integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+
+A list of the IDs of all established relations.
+
+
requests
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
get_departed_charms
+
+```python
+AzureIntegrationProvides.get_departed_charms(self)
+```
+
+Get a list of all charms that have had all units depart since the
+last time this was called.
+
+
mark_completed
+
+```python
+AzureIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
application_name
+
+
+The name of the application making the request.
+
+
charm
+
+
+The charm name reported for this request.
+
+
instance_tags
+
+
+Mapping of tag names to values to apply to this instance.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
model_uuid
+
+
+The UUID of the model containing the application making this request.
+
+
relation_id
+
+
+The ID of the relation for the unit making the request.
+
+
+
+
+The resource group reported for this request.
+
+
unit_name
+
+
+The name of the unit making the request.
+
+
vm_id
+
+
+The instance ID reported for this request.
+
+
vm_name
+
+
+The instance name reported for this request.
+
+
mark_completed
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
diff --git a/kubernetes-master/hooks/relations/azure-integration/docs/requires.md b/kubernetes-master/hooks/relations/azure-integration/docs/requires.md
new file mode 100644
index 0000000..608b4ee
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/docs/requires.md
@@ -0,0 +1,145 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with Azure native features. The integration will
+be provided by the Azure integrator charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of Azure specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific Azure features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the Azure instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
AzureIntegrationRequires
+
+```python
+AzureIntegrationRequires(self, *args, **kwargs)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.azure.joined')
+def request_azure_integration():
+ azure = endpoint_from_flag('endpoint.azure.joined')
+ azure.tag_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ azure.request_load_balancer_management()
+ # ...
+
+@when('endpoint.azure.ready')
+def azure_integration_ready():
+ update_config_enable_azure()
+```
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
resource_group
+
+
+The resource group this unit is in.
+
+
vm_id
+
+
+This unit's instance ID.
+
+
vm_name
+
+
+This unit's instance name.
+
+
tag_instance
+
+```python
+AzureIntegrationRequires.tag_instance(self, tags)
+```
+
+Request that the given tags be applied to this instance.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tags names to values.
+
+
enable_instance_inspection
+
+```python
+AzureIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+AzureIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking.
+
+
enable_security_management
+
+```python
+AzureIntegrationRequires.enable_security_management(self)
+```
+
+Request the ability to manage security (e.g., firewalls).
+
+
enable_block_storage_management
+
+```python
+AzureIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+AzureIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+AzureIntegrationRequires.enable_object_storage_access(self)
+```
+
+Request the ability to access object storage.
+
+
enable_object_storage_management
+
+```python
+AzureIntegrationRequires.enable_object_storage_management(self)
+```
+
+Request the ability to manage object storage.
+
diff --git a/kubernetes-master/hooks/relations/azure-integration/interface.yaml b/kubernetes-master/hooks/relations/azure-integration/interface.yaml
new file mode 100644
index 0000000..a77a7cb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: azure-integration
+summary: Interface for connecting to the Azure integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-master/hooks/relations/azure-integration/make_docs b/kubernetes-master/hooks/relations/azure-integration/make_docs
new file mode 100644
index 0000000..84df5ee
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'azure': {'interface': 'azure-integration'}},
+ 'provides': {'azure': {'interface': 'azure-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/azure-integration/provides.py b/kubernetes-master/hooks/relations/azure-integration/provides.py
new file mode 100644
index 0000000..e0d596e
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/provides.py
@@ -0,0 +1,267 @@
+"""
+This is the provides side of the interface layer, for use only by the Azure
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for Azure integration features.
+ The Azure integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class AzureIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.azure.requests-pending')
+ def handle_requests():
+ azure = endpoint_from_flag('endpoint.azure.requests-pending')
+ for request in azure.requests:
+ if request.instance_tags:
+ layer.azure.tag_instance(
+ request.vm_name,
+ request.resource_group,
+ request.instance_tags)
+ if request.requested_load_balancer_management:
+ layer.azure.enable_load_balancer_management(
+ request.charm,
+ request.vm_name,
+ request.resource_group,
+ )
+ # ...
+ azure.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ if not hasattr(self, '_requests'):
+ all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ is_changed = attrgetter('is_changed')
+ self._requests = list(filter(is_changed, all_requests))
+ return self._requests
+
+ @property
+ def relation_ids(self):
+ """
+ A list of the IDs of all established relations.
+ """
+ return [relation.relation_id for relation in self.relations]
+
+ def get_departed_charms(self):
+ """
+ Get a list of all charms that have had all units depart since the
+ last time this was called.
+ """
+ joined_charms = {unit.received['charm']
+ for unit in self.all_joined_units
+ if unit.received['charm']}
+ departed_charms = [unit.received['charm']
+ for unit in self.all_departed_units
+ if unit.received['charm'] not in joined_charms]
+ self.all_departed_units.clear()
+ return departed_charms
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ for request in self.requests:
+ request.mark_completed()
+ clear_flag(self.expand_name('requests-pending'))
+ self._requests = []
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def _completed(self):
+ return self._to_publish.get('completed', {})
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ if not all([self.charm, self.vm_id, self.vm_name,
+ self.resource_group, self._requested]):
+ return False
+ return self._completed.get(self.vm_id) != self._requested
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._completed
+ completed[self.vm_id] = self._requested
+ self._to_publish['completed'] = completed # have to explicitly update
+
+ def send_additional_metadata(self, resource_group_location,
+ vnet_name, vnet_resource_group,
+ subnet_name, security_group_name):
+ self._to_publish.update({
+ 'resource-group-location': resource_group_location,
+ 'vnet-name': vnet_name,
+ 'vnet-resource-group': vnet_resource_group,
+ 'subnet-name': subnet_name,
+ 'security-group-name': security_group_name,
+ })
+
+ @property
+ def relation_id(self):
+ """
+ The ID of the relation for the unit making the request.
+ """
+ return self._unit.relation.relation_id
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def charm(self):
+ """
+ The charm name reported for this request.
+ """
+ return self._unit.received['charm']
+
+ @property
+ def vm_id(self):
+ """
+ The instance ID reported for this request.
+ """
+ return self._unit.received['vm-id']
+
+ @property
+ def vm_name(self):
+ """
+ The instance name reported for this request.
+ """
+ return self._unit.received['vm-name']
+
+ @property
+ def resource_group(self):
+ """
+ The resource group reported for this request.
+ """
+ return self._unit.received['res-group']
+
+ @property
+ def model_uuid(self):
+ """
+ The UUID of the model containing the application making this request.
+ """
+ return self._unit.received['model-uuid']
+
+ @property
+ def instance_tags(self):
+ """
+ Mapping of tag names to values to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-tags', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_loadbalancer_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-loadbalancer-management'])
+
+
+ @property
+ def requested_security_management(self):
+ """
+ Flag indicating whether security management was requested.
+ """
+ return bool(self._unit.received['enable-security-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
diff --git a/kubernetes-master/hooks/relations/azure-integration/pydocmd.yml b/kubernetes-master/hooks/relations/azure-integration/pydocmd.yml
new file mode 100644
index 0000000..6414c29
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Azure Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.AzureIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.AzureIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/azure-integration/requires.py b/kubernetes-master/hooks/relations/azure-integration/requires.py
new file mode 100644
index 0000000..62f2b01
--- /dev/null
+++ b/kubernetes-master/hooks/relations/azure-integration/requires.py
@@ -0,0 +1,282 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with Azure native features. The integration will
+be provided by the Azure integrator charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of Azure specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific Azure features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the Azure instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import json
+import os
+import random
+import string
+from urllib.request import urlopen, Request
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from Azure metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class AzureIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.azure.joined')
+ def request_azure_integration():
+ azure = endpoint_from_flag('endpoint.azure.joined')
+ azure.tag_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ azure.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.azure.ready')
+ def azure_integration_ready():
+ update_config_enable_azure()
+ ```
+ """
+ # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
+ _metadata_url = 'http://169.254.169.254/metadata/instance?api-version=2017-12-01' # noqa
+ _metadata_headers = {'Metadata': 'true'}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._vm_metadata = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single Azure integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single Azure integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['charm'] = hookenv.charm_name()
+ self._to_publish['vm-id'] = self.vm_id
+ self._to_publish['vm-name'] = self.vm_name
+ self._to_publish['res-group'] = self.resource_group
+ self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def vm_metadata(self):
+ if self._vm_metadata is None:
+ cache_key = self.expand_name('vm-metadata')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._vm_metadata = cached
+ else:
+ req = Request(self._metadata_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ metadata = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._vm_metadata = json.loads(metadata)
+ unitdata.kv().set(cache_key, self._vm_metadata)
+ return self._vm_metadata
+
+ @property
+ def vm_id(self):
+ """
+ This unit's instance ID.
+ """
+ return self.vm_metadata['compute']['vmId']
+
+ @property
+ def vm_name(self):
+ """
+ This unit's instance name.
+ """
+ return self.vm_metadata['compute']['name']
+
+ @property
+ def vm_location(self):
+ """
+ The location (region) the instance is running in.
+ """
+ return self.vm_metadata['compute']['location']
+
+ @property
+ def resource_group(self):
+ """
+ The resource group this unit is in.
+ """
+ return self.vm_metadata['compute']['resourceGroupName']
+
+ @property
+ def resource_group_location(self):
+ """
+ The location (region) the resource group is in.
+ """
+ return self._received['resource-group-location']
+
+ @property
+ def subscription_id(self):
+ """
+ The ID of the Azure Subscription this unit is in.
+ """
+ return self.vm_metadata['compute']['subscriptionId']
+
+ @property
+ def vnet_name(self):
+ """
+ The name of the virtual network the instance is in.
+ """
+ return self._received['vnet-name']
+
+ @property
+ def vnet_resource_group(self):
+ """
+ The name of the virtual network the instance is in.
+ """
+ return self._received['vnet-resource-group']
+
+ @property
+ def subnet_name(self):
+ """
+ The name of the subnet the instance is in.
+ """
+ return self._received['subnet-name']
+
+ @property
+ def security_group_name(self):
+ """
+ The name of the security group attached to the cluster's subnet.
+ """
+ return self._received['security-group-name']
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ requested = self._to_publish['requested']
+ completed = self._received.get('completed', {}).get(self.vm_id)
+ return requested and requested == completed
+
+ @property
+ def credentials(self):
+ return self._received['credentials']
+
+ def _request(self, keyvals):
+ alphabet = string.ascii_letters + string.digits
+ nonce = ''.join(random.choice(alphabet) for _ in range(8))
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = nonce
+ clear_flag(self.expand_name('ready'))
+
+ def tag_instance(self, tags):
+ """
+ Request that the given tags be applied to this instance.
+
+ # Parameters
+ `tags` (dict): Mapping of tags names to values.
+ """
+ self._request({'instance-tags': dict(tags)})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_loadbalancer_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-loadbalancer-management': True})
+
+
+ def enable_security_management(self):
+ """
+ Request the ability to manage security (e.g., firewalls).
+ """
+ self._request({'enable-security-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns': True})
+
+ def enable_object_storage_access(self):
+ """
+ Request the ability to access object storage.
+ """
+ self._request({'enable-object-storage-access': True})
+
+ def enable_object_storage_management(self):
+ """
+ Request the ability to manage object storage.
+ """
+ self._request({'enable-object-storage-management': True})
+
+
diff --git a/kubernetes-master/hooks/relations/ceph-admin/.gitignore b/kubernetes-master/hooks/relations/ceph-admin/.gitignore
new file mode 100644
index 0000000..ca3c9ea
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-admin/.gitignore
@@ -0,0 +1,2 @@
+.idea
+*.swp
diff --git a/kubernetes-master/hooks/relations/ceph-admin/README.md b/kubernetes-master/hooks/relations/ceph-admin/README.md
new file mode 100644
index 0000000..b0c89db
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-admin/README.md
@@ -0,0 +1,41 @@
+# Overview
+
+**WARNING**: This is an unofficial, untested, and experimental layer from
+the community.
+
+This interface layer handles the communication between the Ceph Monitor
+and a client that requires an admin key.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.available` The ceph client has been related to a provider.
+ The following accessors will be available:
+ - key - The admin cephx key
+ - auth - Whether or not strict auth is supported
+ - mon_hosts - The public addresses list of the monitor cluster
+
+
+Client example:
+
+```python
+@when('ceph-admin.available')
+def ceph_connected(ceph_info):
+ charm_ceph_conf = os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf')
+ cephx_key = os.path.join(os.sep, 'etc', 'ceph', 'ceph.client.admin.keyring')
+
+ ceph_context = {
+ 'auth_supported': ceph_client.auth,
+ 'mon_hosts': ceph_client.mon_hosts,
+ }
+
+ with open(charm_ceph_conf, 'w') as cephconf:
+ cephconf.write(render_template('ceph.conf', ceph_context))
+
+ # Write out the cephx_key also
+ with open(cephx_key, 'w') as cephconf:
+ cephconf.write(ceph_client.key)
+```
diff --git a/kubernetes-master/hooks/relations/ceph-admin/__init__.py b/kubernetes-master/hooks/relations/ceph-admin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/ceph-admin/interface.yaml b/kubernetes-master/hooks/relations/ceph-admin/interface.yaml
new file mode 100644
index 0000000..45dd6f4
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-admin/interface.yaml
@@ -0,0 +1,3 @@
+name: ceph-admin
+summary: Ceph Admin Client Interface
+version: 1
diff --git a/kubernetes-master/hooks/relations/ceph-admin/requires.py b/kubernetes-master/hooks/relations/ceph-admin/requires.py
new file mode 100644
index 0000000..ba07b51
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-admin/requires.py
@@ -0,0 +1,23 @@
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag
+
+
+class CephClient(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ all([self.key(),
+ self.fsid(),
+ self.auth(),
+ self.mon_hosts()]))
+
+ def key(self):
+ return self.all_joined_units.received_raw['key']
+
+ def fsid(self):
+ return self.all_joined_units.received_raw['fsid']
+
+ def auth(self):
+ return self.all_joined_units.received_raw['auth']
+
+ def mon_hosts(self):
+ return self.all_joined_units.received_raw['mon_hosts']
diff --git a/kubernetes-master/hooks/relations/ceph-client/README.md b/kubernetes-master/hooks/relations/ceph-client/README.md
new file mode 100644
index 0000000..18076bd
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/README.md
@@ -0,0 +1,43 @@
+# Overview
+
+This interface layer handles the communication between the Ceph Monitor
+cluster and a client that requires an access key and a pool to use.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.available` The ceph client has been related to a provider.
+
+The following accessors will be available:
+
+ - key - The cephx access key
+ - auth - Whether or not strict auth is supported
+ - mon_hosts - The public addresses list of the monitor cluster
+
+Client example:
+
+```python
+@when('ceph-client.connected')
+def ceph_connected(ceph_client):
+ ceph_client.create_pool('newpool')
+
+@when('ceph-client.available')
+def ceph_ready(ceph_client):
+ charm_ceph_conf= os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf')
+ cephx_key = os.path.join(os.sep, 'etc', 'ceph', 'ceph.client.charm.keyring')
+
+ ceph_context = {
+ 'auth_supported': ceph_client.auth,
+ 'mon_hosts': ceph_client.mon_hosts,
+ }
+
+ with open(charm_ceph_conf, 'w') as cephconf:
+ cephconf.write(render_template('ceph.conf', ceph_context))
+
+ # Write out the cephx_key also
+ with open(cephx_key, 'w') as cephconf:
+ cephconf.write(ceph_client.key)
+```
diff --git a/kubernetes-master/hooks/relations/ceph-client/__init__.py b/kubernetes-master/hooks/relations/ceph-client/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/ceph-client/interface.yaml b/kubernetes-master/hooks/relations/ceph-client/interface.yaml
new file mode 100644
index 0000000..8578859
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/interface.yaml
@@ -0,0 +1,13 @@
+name: ceph-client
+summary: Ceph Client Interface
+version: 1
+maintainer: OpenStack Charmers
+ignore:
+ - 'unit_tests'
+ - 'Makefile'
+ - '.testr.conf'
+ - 'test-requirements.txt'
+ - 'tox.ini'
+ - '.gitignore'
+ - '.gitreview'
+ - '.unit-state.db'
\ No newline at end of file
diff --git a/kubernetes-master/hooks/relations/ceph-client/lib/base_provides.py b/kubernetes-master/hooks/relations/ceph-client/lib/base_provides.py
new file mode 100644
index 0000000..32ebfd8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/lib/base_provides.py
@@ -0,0 +1,97 @@
+# from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import (
+ relation_set,
+)
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+# from charms.reactive import is_state
+# from charms.reactive import not_unless
+
+
+class CephProvides(RelationBase):
+ scope = scopes.UNIT
+
+ @hook('{provides:ceph-client}-relation-{joined,changed}')
+ def changed(self):
+ self.set_state('{relation_name}.connected')
+ # service = hookenv.remote_service_name()
+ conversation = self.conversation()
+ if conversation.get_remote('broker_req'):
+ self.set_state('{relation_name}.broker_requested')
+
+ def provide_auth(self, service, key, auth_supported, public_address):
+ """
+ Provide a token to a requesting service.
+ :param str service: The service which requested the key
+ :param str key: The key to access Ceph
+ :param str auth_supported: Supported auth methods
+ :param str public_address: Ceph's public address
+ """
+ conversation = self.conversation(scope=service)
+ # print("Conversation is ", conversation)
+ # key is a keyword argument to the set_remote function so we have to
+ # set it separately.
+ relation_set(
+ relation_id=conversation.namespace,
+ relation_settings={'key': key})
+ opts = {
+ 'auth': auth_supported,
+ 'ceph-public-address': public_address,
+ }
+ conversation.set_remote(**opts)
+
+ def requested_keys(self):
+ """
+ Return a list of tuples mapping a service name to the key name
+ requested by that service.
+ Example usage::
+ for service, key in ceph.requested_keys():
+ ceph.provide_auth(service, key, auth, public_address)
+ """
+ for conversation in self.conversations():
+ service = conversation.scope
+ key = self.requested_key(service)
+ if key is None:
+ yield service
+
+ def requested_key(self, service):
+ """
+ Return the key provided to the requesting service.
+ """
+ return self.conversation(scope=service).get_remote('key')
+
+ def provide_broker_token(self, service, unit_response_key, token):
+ """
+ Provide a token to a requesting service.
+ :param str service: The service which requested the key
+ :param str unit_response_key: The unique key for the unit
+ :param str token: Broker token top provide
+ """
+ conversation = self.conversation(scope=service)
+
+ # broker_rsp is being left for backward compatibility,
+ # unit_response_key superscedes it
+ conversation.set_remote(**{
+ 'broker_rsp': token,
+ unit_response_key: token,
+ })
+
+ def requested_tokens(self):
+ """
+ Return a list of tuples mapping a service name to the token name
+ requested by that service.
+ Example usage::
+ for service, token in ceph.requested_tokens():
+ ceph.provide_auth(service, token, auth, public_address)
+ """
+ for conversation in self.conversations():
+ service = conversation.scope
+ token = self.requested_token(service)
+ yield service, token
+
+ def requested_token(self, service):
+ """
+ Return the token provided to the requesting service.
+ """
+ return self.conversation(scope=service).get_remote('broker_req')
diff --git a/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py b/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py
new file mode 100644
index 0000000..c442c85
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py
@@ -0,0 +1,324 @@
+# Copyright 2017 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import charms.reactive as reactive
+
+from charmhelpers.core.hookenv import log
+from charmhelpers.contrib.network.ip import format_ipv6_addr
+
+from charmhelpers.contrib.storage.linux.ceph import (
+ CephBrokerRq,
+ is_request_complete,
+ is_request_sent,
+)
+
+
+class CephRequires(reactive.Endpoint):
+
+ def joined(self):
+ reactive.set_flag(self.expand_name('{endpoint_name}.connected'))
+
+ @property
+ def key(self):
+ return self._key()
+
+ def _key(self):
+ return self.all_joined_units.received.get('key')
+
+ @property
+ def auth(self):
+ return self._auth()
+
+ def _auth(self):
+ return self.all_joined_units.received.get('auth')
+
+ @property
+ def relation_name(self):
+ return self.expand_name('{endpoint_name}')
+
+ def initial_ceph_response(self):
+ raise NotImplementedError
+
+ def changed(self):
+ data = self.initial_ceph_response()
+ if all(data.values()):
+ reactive.set_flag(self.expand_name('{endpoint_name}.available'))
+
+ rq = self.get_current_request()
+ if rq:
+ log("changed broker_req: {}".format(rq.ops))
+
+ if rq and is_request_complete(rq, relation=self.relation_name):
+ log("Setting ceph-client.pools.available")
+ reactive.set_flag(
+ self.expand_name('{endpoint_name}.pools.available'))
+ else:
+ log("incomplete request. broker_req not found")
+
+ def broken(self):
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.available'))
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.connected'))
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.pools.available'))
+
+ def create_replicated_pool(self, name, replicas=3, weight=None,
+ pg_num=None, group=None, namespace=None,
+ app_name=None, **kwargs):
+ """
+ Request pool setup
+
+ :param name: Name of pool to create
+ :type name: str
+ :param replicas: Number of replicas for supporting pools
+ :type replicas: int
+ :param weight: The percentage of data the pool makes up
+ :type weight: Optional[float]
+ :param pg_num: If not provided, this value will be calculated by the
+ broker based on how many OSDs are in the cluster at the
+ time of creation. Note that, if provided, this value
+ will be capped at the current available maximum.
+ :type pg_num: Optional[int]
+ :param group: Group to add pool to.
+ :type group: Optional[str]
+ :param namespace: A group can optionally have a namespace defined that
+ will be used to further restrict pool access.
+ :type namespace: Optional[str]
+ :param app_name: (Optional) Tag pool with application name. Note that
+ there is certain protocols emerging upstream with
+ regard to meaningful application names to use.
+ Examples are ``rbd`` and ``rgw``.
+ :type app_name: Optional[str]
+ :param kwargs: Additional keyword arguments subject to validation.
+ Refer to CephBrokerRq.add_op_create_replicated_pool
+ method for documentation.
+ :type kwargs: Dict[str,any]
+ """
+ rq = self.get_current_request() or CephBrokerRq()
+ kwargs.update({
+ 'name': name,
+ 'replica_count': replicas,
+ 'pg_num': pg_num,
+ 'weight': weight,
+ 'group': group,
+ 'namespace': namespace,
+ 'app_name': app_name,
+ })
+ rq.add_op_create_replicated_pool(**kwargs)
+ self.send_request_if_needed(rq)
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.pools.available'))
+
+ def create_pool(self, name, replicas=3, weight=None, pg_num=None,
+ group=None, namespace=None):
+ """
+ Request pool setup -- deprecated. Please use create_replicated_pool
+ or create_erasure_pool(which doesn't exist yet)
+
+ @param name: Name of pool to create
+ @param replicas: Number of replicas for supporting pools
+ @param weight: The percentage of data the pool makes up
+ @param pg_num: If not provided, this value will be calculated by the
+ broker based on how many OSDs are in the cluster at the
+ time of creation. Note that, if provided, this value
+ will be capped at the current available maximum.
+ @param group: Group to add pool to.
+ @param namespace: A group can optionally have a namespace defined that
+ will be used to further restrict pool access.
+ """
+ self.create_replicated_pool(name, replicas, weight, pg_num, group,
+ namespace)
+
+ def create_erasure_pool(self, name, erasure_profile=None,
+ weight=None, group=None, app_name=None,
+ max_bytes=None, max_objects=None,
+ allow_ec_overwrites=False,
+ **kwargs):
+ """
+ Request erasure coded pool setup
+
+ :param name: Name of pool to create
+ :type name: str
+ :param erasure_profile: Name of erasure profile for pool
+ :type erasure_profile: str
+ :param weight: The percentage of data the pool makes up
+ :type weight: Optional[float]
+ :param group: Group to add pool to.
+ :type group: Optional[str]
+ :param app_name: Name of application using pool
+ :type app_name: Optional[str]
+ :param max_bytes: Maximum bytes of quota to apply
+ :type max_bytes: Optional[int]
+ :param max_objects: Maximum object quota to apply
+ :type max_objects: Optional[int]
+ :param allow_ec_overwrites: Allow EC pools to be overwritten
+ :type allow_ec_overwrites: bool
+ :param kwargs: Additional keyword arguments subject to validation.
+ Refer to CephBrokerRq.add_op_create_replicated_pool
+ method for documentation.
+ :type kwargs: Dict[str,any]
+ """
+ rq = self.get_current_request() or CephBrokerRq()
+ kwargs.update({
+ 'name': name,
+ 'erasure_profile': erasure_profile,
+ 'weight': weight,
+ 'group': group,
+ 'app_name': app_name,
+ 'max_bytes': max_bytes,
+ 'max_objects': max_objects,
+ 'allow_ec_overwrites': allow_ec_overwrites,
+ })
+ rq.add_op_create_erasure_pool(**kwargs)
+ self.send_request_if_needed(rq)
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.pools.available'))
+
+ def create_erasure_profile(self, name,
+ erasure_type='jerasure',
+ erasure_technique=None,
+ k=None, m=None,
+ failure_domain=None,
+ lrc_locality=None,
+ shec_durability_estimator=None,
+ clay_helper_chunks=None,
+ device_class=None,
+ clay_scalar_mds=None,
+ lrc_crush_locality=None):
+ """
+ Create erasure coding profile
+
+ @param name: Name of erasure coding profile
+ @param erasure_type: Erasure coding plugin to use
+ @param erasure_technique: Erasure coding technique to use
+ @param k: Number of data chunks
+ @param m: Number of coding chunks
+ @param failure_domain: Failure domain to use for PG placement
+ @param lrc_locality:
+ Group the coding and data chunks into sets
+ of size locality (lrc plugin)
+ @param shec_durability_estimator:
+ The number of parity chuncks each of which includes
+ a data chunk in its calculation range (shec plugin)
+ @param clay_helper_chunks:
+ The number of helper chunks to use for recovery operations
+ (clay plugin)
+ @param device_class:
+ Device class to use for profile (ssd, hdd, nvme)
+ @param clay_scalar_mds:
+ Plugin to use for CLAY layered construction
+ (jerasure|isa|shec)
+ @param lrc_crush_locality:
+ Type of crush bucket in which set of chunks
+ defined by lrc_locality will be stored.
+ """
+ rq = self.get_current_request() or CephBrokerRq()
+ rq.add_op_create_erasure_profile(
+ name=name,
+ erasure_type=erasure_type,
+ erasure_technique=erasure_technique,
+ k=k, m=m,
+ failure_domain=failure_domain,
+ lrc_locality=lrc_locality,
+ shec_durability_estimator=shec_durability_estimator,
+ clay_helper_chunks=clay_helper_chunks,
+ device_class=device_class,
+ clay_scalar_mds=clay_scalar_mds,
+ lrc_crush_locality=lrc_crush_locality
+ )
+ self.send_request_if_needed(rq)
+ reactive.clear_flag(
+ self.expand_name('{endpoint_name}.pools.available'))
+
+ def request_access_to_group(self, name, namespace=None, permission=None,
+ key_name=None,
+ object_prefix_permissions=None):
+ """
+ Adds the requested permissions to service's Ceph key
+
+ Adds the requested permissions to the current service's Ceph key,
+ allowing the key to access only the specified pools or
+ object prefixes. object_prefix_permissions should be a dictionary
+ keyed on the permission with the corresponding value being a list
+ of prefixes to apply that permission to.
+ {
+ 'rwx': ['prefix1', 'prefix2'],
+ 'class-read': ['prefix3']}
+ @param name: Target group name for permissions request.
+ @param namespace: namespace to further restrict pool access.
+ @param permission: Permission to be requested against pool
+ @param key_name: userid to grant permission to
+ @param object_prefix_permissions: Add object_prefix permissions.
+ """
+ current_request = self.get_current_request() or CephBrokerRq()
+ current_request.add_op_request_access_to_group(
+ name,
+ namespace=namespace,
+ permission=permission,
+ key_name=key_name,
+ object_prefix_permissions=object_prefix_permissions)
+ self.send_request_if_needed(current_request)
+
+ def send_request_if_needed(self, request):
+ """Send broker request if an equivalent request has not been sent
+
+ @param request: A CephBrokerRq object
+ """
+ if is_request_sent(request, relation=self.relation_name):
+ log('Request already sent but not complete, '
+ 'not sending new request')
+ else:
+ for relation in self.relations:
+ relation.to_publish['broker_req'] = json.loads(
+ request.request)
+
+ def get_current_request(self):
+ broker_reqs = []
+ for relation in self.relations:
+ broker_req = relation.to_publish.get('broker_req', {})
+ if broker_req:
+ rq = CephBrokerRq()
+ rq.set_ops(broker_req['ops'])
+ broker_reqs.append(rq)
+ # Check that if there are multiple requests then they are the same.
+ assert all(x == broker_reqs[0] for x in broker_reqs)
+ if broker_reqs:
+ return broker_reqs[0]
+
+ def get_remote_all(self, key, default=None):
+ """Return a list of all values presented by remote units for key"""
+ values = []
+ for relation in self.relations:
+ for unit in relation.units:
+ value = unit.received.get(key, default)
+ if value:
+ values.append(value)
+ return list(set(values))
+
+ def mon_hosts(self):
+ """List of all monitor host public addresses"""
+ hosts = []
+ addrs = self.get_remote_all('ceph-public-address')
+ for ceph_addrs in addrs:
+ # NOTE(jamespage): This looks odd but deals with
+ # use with ceph-proxy which
+ # presents all monitors in
+ # a single space delimited field.
+ for addr in ceph_addrs.split(' '):
+ hosts.append(format_ipv6_addr(addr) or addr)
+ hosts.sort()
+ return hosts
diff --git a/kubernetes-master/hooks/relations/ceph-client/provides.py b/kubernetes-master/hooks/relations/ceph-client/provides.py
new file mode 100644
index 0000000..a8fbd05
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/provides.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .lib import base_provides
+
+
+class CephClientProvider(base_provides.CephProvides):
+
+ pass
diff --git a/kubernetes-master/hooks/relations/ceph-client/requires.py b/kubernetes-master/hooks/relations/ceph-client/requires.py
new file mode 100644
index 0000000..f542246
--- /dev/null
+++ b/kubernetes-master/hooks/relations/ceph-client/requires.py
@@ -0,0 +1,46 @@
+# Copyright 2020 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .lib import base_requires
+
+from charms.reactive import (
+ when,
+)
+
+
+class CephClientRequires(base_requires.CephRequires):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ super().joined()
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ super().changed()
+
+ @when('endpoint.{endpoint_name}.departed')
+ def departed(self):
+ super().changed()
+
+ @when('endpoint.{endpoint_name}.broken')
+ def broken(self):
+ super().broken()
+
+ def initial_ceph_response(self):
+ data = {
+ 'key': self.key,
+ 'auth': self.auth,
+ 'mon_hosts': self.mon_hosts()
+ }
+ return data
diff --git a/kubernetes-master/hooks/relations/container-runtime/.gitignore b/kubernetes-master/hooks/relations/container-runtime/.gitignore
new file mode 100644
index 0000000..894a44c
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/.gitignore
@@ -0,0 +1,104 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
diff --git a/kubernetes-master/hooks/relations/container-runtime/LICENSE b/kubernetes-master/hooks/relations/container-runtime/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/container-runtime/README.md b/kubernetes-master/hooks/relations/container-runtime/README.md
new file mode 100644
index 0000000..4620013
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/README.md
@@ -0,0 +1,45 @@
+# interface-container-runtime
+
+## Overview
+
+This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime.
+
+## Usage
+
+### Provides
+
+The providing side of the container interface provides a place for a container runtime to connect to.
+
+Your charm should respond to the `endpoint.{endpoint_name}.available` state,
+which indicates that there is a container runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def update_kubelet_config(containerd):
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ config = endpoint.get_config()
+ kubelet.config['container-runtime'] = \
+ config['runtime']
+```
+
+### Requires
+
+The requiring side of the container interface requires a place for a container runtime to connect to.
+
+Your charm should set `{endpoint_name}.available` state,
+which indicates that the container is runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def pubish_config():
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ endpoint.set_config(
+ socket='unix:///var/run/containerd/containerd.sock',
+ runtime='remote',
+ nvidia_enabled=False
+ )
+```
diff --git a/kubernetes-master/hooks/relations/container-runtime/__init__.py b/kubernetes-master/hooks/relations/container-runtime/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/container-runtime/interface.yaml b/kubernetes-master/hooks/relations/container-runtime/interface.yaml
new file mode 100644
index 0000000..294be1e
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/interface.yaml
@@ -0,0 +1,4 @@
+name: container-runtime
+summary: Interface for relating to container runtimes
+version: 1
+maintainer: "Joe Borg "
diff --git a/kubernetes-master/hooks/relations/container-runtime/provides.py b/kubernetes-master/hooks/relations/container-runtime/provides.py
new file mode 100644
index 0000000..a9768a8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/provides.py
@@ -0,0 +1,55 @@
+from charms.reactive import (
+ Endpoint,
+ toggle_flag
+)
+
+
+class ContainerRuntimeProvides(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+
+ def _get_config(self, key):
+ """
+ Get the published configuration for a given key.
+
+ :param key: String dict key
+ :return: String value for given key
+ """
+ return self.all_joined_units.received.get(key)
+
+ def get_nvidia_enabled(self):
+ """
+ Get the published nvidia config.
+
+ :return: String
+ """
+ return self._get_config(key='nvidia_enabled')
+
+ def get_runtime(self):
+ """
+ Get the published runtime config.
+
+ :return: String
+ """
+ return self._get_config(key='runtime')
+
+ def get_socket(self):
+ """
+ Get the published socket config.
+
+ :return: String
+ """
+ return self._get_config(key='socket')
+
+ def set_config(self, sandbox_image=None):
+ """
+ Set the configuration to be published.
+
+ :param sandbox_image: String to optionally override the sandbox image
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'sandbox_image': sandbox_image
+ })
diff --git a/kubernetes-master/hooks/relations/container-runtime/requires.py b/kubernetes-master/hooks/relations/container-runtime/requires.py
new file mode 100644
index 0000000..c461b68
--- /dev/null
+++ b/kubernetes-master/hooks/relations/container-runtime/requires.py
@@ -0,0 +1,61 @@
+from charms.reactive import (
+ Endpoint,
+ clear_flag,
+ data_changed,
+ is_data_changed,
+ toggle_flag
+)
+
+
+class ContainerRuntimeRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'),
+ self.is_joined and self._config_changed())
+
+ def _config_changed(self):
+ """
+ Determine if our received data has changed.
+
+ :return: Boolean
+ """
+ # NB: this call should match whatever we're tracking in handle_remote_config
+ return is_data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def handle_remote_config(self):
+ """
+ Keep track of received data so we can know if it changes.
+
+ :return: None
+ """
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'))
+ # Presently, we only care about one piece of remote config. Expand
+ # the list as needed.
+ data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def get_sandbox_image(self):
+ """
+ Get the sandbox image URI if a remote has published one.
+
+ :return: String: remotely configured sandbox image
+ """
+ return self.all_joined_units.received.get('sandbox_image')
+
+ def set_config(self, socket, runtime, nvidia_enabled):
+ """
+ Set the configuration to be published.
+
+ :param socket: String uri to runtime socket
+ :param runtime: String runtime executable
+ :param nvidia_enabled: Boolean nvidia runtime enabled
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'socket': socket,
+ 'runtime': runtime,
+ 'nvidia_enabled': nvidia_enabled
+ })
diff --git a/kubernetes-master/hooks/relations/coordinator/peers.py b/kubernetes-master/hooks/relations/coordinator/peers.py
new file mode 100644
index 0000000..f443bf6
--- /dev/null
+++ b/kubernetes-master/hooks/relations/coordinator/peers.py
@@ -0,0 +1,21 @@
+# Copyright 2016-2018 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju charms.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charms import reactive
+
+
+class CoordinatorPeer(reactive.Endpoint):
+ pass
diff --git a/kubernetes-master/hooks/relations/etcd/.gitignore b/kubernetes-master/hooks/relations/etcd/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/kubernetes-master/hooks/relations/etcd/README.md b/kubernetes-master/hooks/relations/etcd/README.md
new file mode 100644
index 0000000..9ed51dd
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/README.md
@@ -0,0 +1,89 @@
+# Overview
+
+This interface layer handles the communication with Etcd via the `etcd`
+interface.
+
+# Usage
+
+## Requires
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established, but Etcd may not
+ yet have provided any connection or service information.
+
+ * `{relation_name}.available` Etcd has provided its connection string
+ information, and is ready to serve as a KV store.
+ The provided information can be accessed via the following methods:
+ * `etcd.get_connection_string()`
+ * `etcd.get_version()`
+ * `{relation_name}.tls.available` Etcd has provided the connection string
+ information, and the tls client credentials to communicate with it.
+ The client credentials can be accessed via:
+ * `{relation_name}.get_client_credentials()` returning a dictionary of
+ the clinet certificate, key and CA.
+ * `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience
+ method to save the client certificate, key and CA to files of your
+ choosing.
+
+
+For example, a common application for this is configuring an applications
+backend key/value storage, like Docker.
+
+```python
+@when('etcd.available', 'docker.available')
+def swarm_etcd_cluster_setup(etcd):
+ con_string = etcd.connection_string().replace('http', 'etcd')
+ opts = {}
+ opts['connection_string'] = con_string
+ render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts)
+
+```
+
+
+## Provides
+
+A charm providing this interface is providing the Etcd rest api service.
+
+This interface layer will set the following states, as appropriate:
+
+ * `{relation_name}.connected` One or more clients of any type have
+ been related. The charm should call the following methods to provide the
+ appropriate information to the clients:
+
+ * `{relation_name}.set_connection_string(string, version)`
+ * `{relation_name}.set_client_credentials(key, cert, ca)`
+
+Example:
+
+```python
+@when('db.connected')
+def send_connection_details(db):
+ cert = leader_get('client_certificate')
+ key = leader_get('client_key')
+ ca = leader_get('certificate_authority')
+ # Set the key, cert, and ca on the db relation
+ db.set_client_credentials(key, cert, ca)
+
+ port = hookenv.config().get('port')
+ # Get all the peers participating in the cluster relation.
+ addresses = cluster.get_peer_addresses()
+ connections = []
+ for address in addresses:
+ connections.append('http://{0}:{1}'.format(address, port))
+ # Set the connection string on the db relation.
+ db.set_connection_string(','.join(conections))
+```
+
+
+# Contact Information
+
+### Maintainer
+- Charles Butler
+
+
+# Etcd
+
+- [Etcd](https://coreos.com/etcd/) home page
+- [Etcd bug trackers](https://github.com/coreos/etcd/issues)
+- [Etcd Juju Charm](http://jujucharms.com/?text=etcd)
diff --git a/kubernetes-master/hooks/relations/etcd/__init__.py b/kubernetes-master/hooks/relations/etcd/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/etcd/interface.yaml b/kubernetes-master/hooks/relations/etcd/interface.yaml
new file mode 100644
index 0000000..929b1d5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/interface.yaml
@@ -0,0 +1,4 @@
+name: etcd
+summary: Interface for relating to ETCD
+version: 2
+maintainer: "Charles Butler "
diff --git a/kubernetes-master/hooks/relations/etcd/peers.py b/kubernetes-master/hooks/relations/etcd/peers.py
new file mode 100644
index 0000000..90980d1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/peers.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdPeer(RelationBase):
+ '''This class handles peer relation communication by setting states that
+ the reactive code can respond to. '''
+
+ scope = scopes.UNIT
+
+ @hook('{peers:etcd}-relation-joined')
+ def peer_joined(self):
+ '''A new peer has joined, set the state on the unit so we can track
+ when they are departed. '''
+ conv = self.conversation()
+ conv.set_state('{relation_name}.joined')
+
+ @hook('{peers:etcd}-relation-departed')
+ def peers_going_away(self):
+ '''Trigger a state on the unit that it is leaving. We can use this
+ state in conjunction with the joined state to determine which unit to
+ unregister from the etcd cluster. '''
+ conv = self.conversation()
+ conv.remove_state('{relation_name}.joined')
+ conv.set_state('{relation_name}.departing')
+
+ def dismiss(self):
+ '''Remove the departing state from all other units in the conversation,
+ and we can resume normal operation.
+ '''
+ for conv in self.conversations():
+ conv.remove_state('{relation_name}.departing')
+
+ def get_peers(self):
+ '''Return a list of names for the peers participating in this
+ conversation scope. '''
+ peers = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ peers.append(conversation.scope)
+ return peers
+
+ def set_db_ingress_address(self, address):
+ '''Set the ingress address belonging to the db relation.'''
+ for conversation in self.conversations():
+ conversation.set_remote('db-ingress-address', address)
+
+ def get_db_ingress_addresses(self):
+ '''Return a list of db ingress addresses'''
+ addresses = []
+ # Iterate over all the conversations of this type.
+ for conversation in self.conversations():
+ address = conversation.get_remote('db-ingress-address')
+ if address:
+ addresses.append(address)
+ return addresses
diff --git a/kubernetes-master/hooks/relations/etcd/provides.py b/kubernetes-master/hooks/relations/etcd/provides.py
new file mode 100644
index 0000000..3cfc174
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/provides.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdProvider(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:etcd}-relation-{joined,changed}')
+ def joined_or_changed(self):
+ ''' Set the connected state from the provides side of the relation. '''
+ self.set_state('{relation_name}.connected')
+
+ @hook('{provides:etcd}-relation-{broken,departed}')
+ def broken_or_departed(self):
+ '''Remove connected state from the provides side of the relation. '''
+ conv = self.conversation()
+ if len(conv.units) == 1:
+ conv.remove_state('{relation_name}.connected')
+
+ def set_client_credentials(self, key, cert, ca):
+ ''' Set the client credentials on the global conversation for this
+ relation. '''
+ self.set_remote('client_key', key)
+ self.set_remote('client_ca', ca)
+ self.set_remote('client_cert', cert)
+
+ def set_connection_string(self, connection_string, version=''):
+ ''' Set the connection string on the global conversation for this
+ relation. '''
+ # Note: Version added as a late-dependency for 2 => 3 migration
+ # If no version is specified, consumers should presume etcd 2.x
+ self.set_remote('connection_string', connection_string)
+ self.set_remote('version', version)
diff --git a/kubernetes-master/hooks/relations/etcd/requires.py b/kubernetes-master/hooks/relations/etcd/requires.py
new file mode 100644
index 0000000..435532f
--- /dev/null
+++ b/kubernetes-master/hooks/relations/etcd/requires.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class EtcdClient(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{requires:etcd}-relation-{joined,changed}')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ self.set_state('{relation_name}.connected')
+
+ if self.get_connection_string():
+ self.set_state('{relation_name}.available')
+ # Get the ca, key, cert from the relation data.
+ cert = self.get_client_credentials()
+ # The tls state depends on the existance of the ca, key and cert.
+ if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa
+ self.set_state('{relation_name}.tls.available')
+
+ @hook('{requires:etcd}-relation-{broken, departed}')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+ self.remove_state('{relation_name}.tls.available')
+
+ def connection_string(self):
+ ''' This method is depreciated but ensures backward compatibility
+ @see get_connection_string(self). '''
+ return self.get_connection_string()
+
+ def get_connection_string(self):
+ ''' Return the connection string, if available, or None. '''
+ return self.get_remote('connection_string')
+
+ def get_version(self):
+ ''' Return the version of the etd protocol being used, or None. '''
+ return self.get_remote('version')
+
+ def get_client_credentials(self):
+ ''' Return a dict with the client certificate, ca and key to
+ communicate with etcd using tls. '''
+ return {'client_cert': self.get_remote('client_cert'),
+ 'client_key': self.get_remote('client_key'),
+ 'client_ca': self.get_remote('client_ca')}
+
+ def save_client_credentials(self, key, cert, ca):
+ ''' Save all the client certificates for etcd to local files. '''
+ self._save_remote_data('client_cert', cert)
+ self._save_remote_data('client_key', key)
+ self._save_remote_data('client_ca', ca)
+
+ def _save_remote_data(self, key, path):
+ ''' Save the remote data to a file indicated by path creating the
+ parent directory if needed.'''
+ value = self.get_remote(key)
+ if value:
+ parent = os.path.dirname(path)
+ if not os.path.isdir(parent):
+ os.makedirs(parent)
+ with open(path, 'w') as stream:
+ stream.write(value)
diff --git a/kubernetes-master/hooks/relations/gcp-integration/.gitignore b/kubernetes-master/hooks/relations/gcp-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/gcp-integration/LICENSE b/kubernetes-master/hooks/relations/gcp-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/gcp-integration/README.md b/kubernetes-master/hooks/relations/gcp-integration/README.md
new file mode 100644
index 0000000..42861fb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `gcp-integration` interface communication protocol
+and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:gcp-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:gcp-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `gcp-integration` interface protocol:
+
+```yaml
+requires:
+ gcp:
+ interface: gcp-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the gcp-integrator charm)
diff --git a/kubernetes-master/hooks/relations/gcp-integration/__init__.py b/kubernetes-master/hooks/relations/gcp-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/gcp-integration/copyright b/kubernetes-master/hooks/relations/gcp-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/gcp-integration/docs/provides.md b/kubernetes-master/hooks/relations/gcp-integration/docs/provides.md
new file mode 100644
index 0000000..6f29a39
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/docs/provides.md
@@ -0,0 +1,183 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the GCP
+integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for GCP integration features.
+ The GCP integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+
+A list of the IDs of all established relations.
+
+
requests
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
get_departed_charms
+
+```python
+GCPIntegrationProvides.get_departed_charms(self)
+```
+
+Get a list of all charms that have had all units depart since the
+last time this was called.
+
+
mark_completed
+
+```python
+GCPIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
application_name
+
+
+The name of the application making the request.
+
+
charm
+
+
+The charm name reported for this request.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
instance
+
+
+The instance name reported for this request.
+
+
instance_labels
+
+
+Mapping of label names to values to apply to this instance.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
model_uuid
+
+
+The UUID of the model containing the application making this request.
+
+
relation_id
+
+
+The ID of the relation for the unit making the request.
+
+
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(self, credentials)
+```
+
+Set the credentials for this request.
+
diff --git a/kubernetes-master/hooks/relations/gcp-integration/docs/requires.md b/kubernetes-master/hooks/relations/gcp-integration/docs/requires.md
new file mode 100644
index 0000000..36e23c2
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/docs/requires.md
@@ -0,0 +1,140 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with GCP native features. The integration will
+be provided by the GCP integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of GCP specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific GCP features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the GCP instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
GCPIntegrationRequires
+
+```python
+GCPIntegrationRequires(self, *args, **kwargs)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.gcp.joined')
+def request_gcp_integration():
+ gcp = endpoint_from_flag('endpoint.gcp.joined')
+ gcp.label_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ gcp.request_load_balancer_management()
+ # ...
+
+@when('endpoint.gcp.ready')
+def gcp_integration_ready():
+ update_config_enable_gcp()
+```
+
+
instance
+
+
+This unit's instance name.
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
zone
+
+
+The zone this unit is in.
+
+
label_instance
+
+```python
+GCPIntegrationRequires.label_instance(self, labels)
+```
+
+Request that the given labels be applied to this instance.
+
+__Parameters__
+
+- __`labels` (dict)__: Mapping of labels names to values.
+
+
enable_instance_inspection
+
+```python
+GCPIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+GCPIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking.
+
+
enable_security_management
+
+```python
+GCPIntegrationRequires.enable_security_management(self)
+```
+
+Request the ability to manage security (e.g., firewalls).
+
+
enable_block_storage_management
+
+```python
+GCPIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+GCPIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+GCPIntegrationRequires.enable_object_storage_access(self)
+```
+
+Request the ability to access object storage.
+
+
enable_object_storage_management
+
+```python
+GCPIntegrationRequires.enable_object_storage_management(self)
+```
+
+Request the ability to manage object storage.
+
diff --git a/kubernetes-master/hooks/relations/gcp-integration/interface.yaml b/kubernetes-master/hooks/relations/gcp-integration/interface.yaml
new file mode 100644
index 0000000..9966e3f
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: gcp-integration
+summary: Interface for connecting to the GCP integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-master/hooks/relations/gcp-integration/make_docs b/kubernetes-master/hooks/relations/gcp-integration/make_docs
new file mode 100644
index 0000000..bd4e54e
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'gcp': {'interface': 'gcp-integration'}},
+ 'provides': {'gcp': {'interface': 'gcp-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/gcp-integration/provides.py b/kubernetes-master/hooks/relations/gcp-integration/provides.py
new file mode 100644
index 0000000..ba34b0d
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/provides.py
@@ -0,0 +1,253 @@
+"""
+This is the provides side of the interface layer, for use only by the GCP
+integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for GCP integration features.
+ The GCP integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class GCPIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.gcp.requests-pending')
+ def handle_requests():
+ gcp = endpoint_from_flag('endpoint.gcp.requests-pending')
+ for request in gcp.requests:
+ if request.instance_labels:
+ layer.gcp.label_instance(
+ request.instance,
+ request.zone,
+ request.instance_labels)
+ if request.requested_load_balancer_management:
+ layer.gcp.enable_load_balancer_management(
+ request.charm,
+ request.instance,
+ request.zone,
+ )
+ # ...
+ gcp.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ if not hasattr(self, '_requests'):
+ all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ is_changed = attrgetter('is_changed')
+ self._requests = list(filter(is_changed, all_requests))
+ return self._requests
+
+ @property
+ def relation_ids(self):
+ """
+ A list of the IDs of all established relations.
+ """
+ return [relation.relation_id for relation in self.relations]
+
+ def get_departed_charms(self):
+ """
+ Get a list of all charms that have had all units depart since the
+ last time this was called.
+ """
+ joined_charms = {unit.received['charm']
+ for unit in self.all_joined_units
+ if unit.received['charm']}
+ departed_charms = [unit.received['charm']
+ for unit in self.all_departed_units
+ if unit.received['charm'] not in joined_charms]
+ self.all_departed_units.clear()
+ return departed_charms
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ for request in self.requests:
+ request.mark_completed()
+ clear_flag(self.expand_name('requests-pending'))
+ self._requests = []
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def _completed(self):
+ return self._to_publish.get('completed', {})
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ if not all([self.charm, self.instance, self.zone, self._requested]):
+ return False
+ return self._completed.get(self.instance) != self._requested
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._completed
+ completed[self.instance] = self._requested
+ self._to_publish['completed'] = completed # have to explicitly update
+
+ def set_credentials(self, credentials):
+ """
+ Set the credentials for this request.
+ """
+ self._unit.relation.to_publish['credentials'] = credentials
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not credentials have been set via `set_credentials`.
+ """
+ return 'credentials' in self._unit.relation.to_publish
+
+ @property
+ def relation_id(self):
+ """
+ The ID of the relation for the unit making the request.
+ """
+ return self._unit.relation.relation_id
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def charm(self):
+ """
+ The charm name reported for this request.
+ """
+ return self._unit.received['charm']
+
+ @property
+ def instance(self):
+ """
+ The instance name reported for this request.
+ """
+ return self._unit.received['instance']
+
+ @property
+ def zone(self):
+ """
+ The zone reported for this request.
+ """
+ return self._unit.received['zone']
+
+ @property
+ def model_uuid(self):
+ """
+ The UUID of the model containing the application making this request.
+ """
+ return self._unit.received['model-uuid']
+
+ @property
+ def instance_labels(self):
+ """
+ Mapping of label names to values to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-labels', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_security_management(self):
+ """
+ Flag indicating whether security management was requested.
+ """
+ return bool(self._unit.received['enable-security-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
diff --git a/kubernetes-master/hooks/relations/gcp-integration/pydocmd.yml b/kubernetes-master/hooks/relations/gcp-integration/pydocmd.yml
new file mode 100644
index 0000000..9ef5e78
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'GCP Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.GCPIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.GCPIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/gcp-integration/requires.py b/kubernetes-master/hooks/relations/gcp-integration/requires.py
new file mode 100644
index 0000000..bbd191f
--- /dev/null
+++ b/kubernetes-master/hooks/relations/gcp-integration/requires.py
@@ -0,0 +1,227 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with GCP native features. The integration will
+be provided by the GCP integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of GCP specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific GCP features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the GCP instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import os
+import random
+import string
+from urllib.parse import urljoin
+from urllib.request import urlopen, Request
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from GCP metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class GCPIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.gcp.joined')
+ def request_gcp_integration():
+ gcp = endpoint_from_flag('endpoint.gcp.joined')
+ gcp.label_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ gcp.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.gcp.ready')
+ def gcp_integration_ready():
+ update_config_enable_gcp()
+ ```
+ """
+ # https://cloud.google.com/compute/docs/storing-retrieving-metadata
+ _metadata_url = 'http://metadata.google.internal/computeMetadata/v1/'
+ _instance_url = urljoin(_metadata_url, 'instance/name')
+ _zone_url = urljoin(_metadata_url, 'instance/zone')
+ _metadata_headers = {'Metadata-Flavor': 'Google'}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._instance = None
+ self._zone = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single GCP integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single GCP integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['charm'] = hookenv.charm_name()
+ self._to_publish['instance'] = self.instance
+ self._to_publish['zone'] = self.zone
+ self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def instance(self):
+ """
+ This unit's instance name.
+ """
+ if self._instance is None:
+ cache_key = self.expand_name('instance')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._instance = cached
+ else:
+ req = Request(self._instance_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ instance = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._instance = instance
+ unitdata.kv().set(cache_key, self._instance)
+ return self._instance
+
+ @property
+ def zone(self):
+ """
+ The zone this unit is in.
+ """
+ if self._zone is None:
+ cache_key = self.expand_name('zone')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._zone = cached
+ else:
+ req = Request(self._zone_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ zone = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._zone = zone.split('/')[-1]
+ unitdata.kv().set(cache_key, self._zone)
+ return self._zone
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ requested = self._to_publish['requested']
+ completed = self._received.get('completed', {}).get(self.instance)
+ return requested and requested == completed
+
+ @property
+ def credentials(self):
+ return self._received['credentials']
+
+ def _request(self, keyvals):
+ alphabet = string.ascii_letters + string.digits
+ nonce = ''.join(random.choice(alphabet) for _ in range(8))
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = nonce
+ clear_flag(self.expand_name('ready'))
+
+ def label_instance(self, labels):
+ """
+ Request that the given labels be applied to this instance.
+
+ # Parameters
+ `labels` (dict): Mapping of labels names to values.
+ """
+ self._request({'instance-labels': dict(labels)})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_security_management(self):
+ """
+ Request the ability to manage security (e.g., firewalls).
+ """
+ self._request({'enable-security-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns': True})
+
+ def enable_object_storage_access(self):
+ """
+ Request the ability to access object storage.
+ """
+ self._request({'enable-object-storage-access': True})
+
+ def enable_object_storage_management(self):
+ """
+ Request the ability to manage object storage.
+ """
+ self._request({'enable-object-storage-management': True})
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/.gitignore b/kubernetes-master/hooks/relations/grafana-dashboard/.gitignore
new file mode 100644
index 0000000..01a6a44
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/.gitignore
@@ -0,0 +1,3 @@
+.docs
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/LICENSE b/kubernetes-master/hooks/relations/grafana-dashboard/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/README.md b/kubernetes-master/hooks/relations/grafana-dashboard/README.md
new file mode 100644
index 0000000..b111350
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/README.md
@@ -0,0 +1,92 @@
+# Interface grafana-dashboard
+
+This is a [Juju][] interface layer that enables a charm which provides
+dashboards to be imported into Grafana.
+
+You can download existing [Grafana Dashboards][] or use the [Grafana Dashboard
+Reference][] to create your own.
+
+# Example Usage
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+provides:
+ grafana:
+ interface: grafana-dashboard
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:grafana-dashboard
+```
+
+Then, in your reactive code, add the following, modifying the dashboard data as
+your charm needs:
+
+```python
+import json
+from charms.reactive import endpoint_from_flag
+
+
+@when('endpoint.grafana.joined')
+def register_grafana_dashboards():
+ grafana = endpoint_from_flag('endpoint.grafana.joined')
+ for dashboard_file in Path('files/grafana').glob('*.json'):
+ dashboard = json.loads(dashboard_file.read_text())
+ grafana.register_dashboard(name=dashboard_file.stem,
+ dashboard=dashboard)
+```
+
+
+
+# Reference
+
+* [common.md](common.md)
+ * [ImportRequest](docs/common.md#importrequest)
+ * [egress_subnets](docs/common.md#importrequest-egress_subnets)
+ * [ingress_address](docs/common.md#importrequest-ingress_address)
+ * [is_created](docs/common.md#importrequest-is_created)
+ * [is_received](docs/common.md#importrequest-is_received)
+ * [respond](docs/common.md#importrequest-respond)
+ * [ImportResponse](docs/common.md#importresponse)
+ * [name](docs/common.md#importresponse-name)
+* [provides.md](provides.md)
+ * [GrafanaDashboardProvides](docs/provides.md#grafanadashboardprovides)
+ * [all_departed_units](docs/provides.md#grafanadashboardprovides-all_departed_units)
+ * [all_joined_units](docs/provides.md#grafanadashboardprovides-all_joined_units)
+ * [all_units](docs/provides.md#grafanadashboardprovides-all_units)
+ * [endpoint_name](docs/provides.md#grafanadashboardprovides-endpoint_name)
+ * [failed_imports](docs/provides.md#grafanadashboardprovides-failed_imports)
+ * [is_joined](docs/provides.md#grafanadashboardprovides-is_joined)
+ * [joined](docs/provides.md#grafanadashboardprovides-joined)
+ * [manage_flags](docs/provides.md#grafanadashboardprovides-manage_flags)
+ * [register_dashboard](docs/provides.md#grafanadashboardprovides-register_dashboard)
+ * [relations](docs/provides.md#grafanadashboardprovides-relations)
+ * [requests](docs/provides.md#grafanadashboardprovides-requests)
+ * [responses](docs/provides.md#grafanadashboardprovides-responses)
+* [requires.md](requires.md)
+ * [GrafanaDashboardRequires](docs/requires.md#grafanadashboardrequires)
+ * [all_departed_units](docs/requires.md#grafanadashboardrequires-all_departed_units)
+ * [all_joined_units](docs/requires.md#grafanadashboardrequires-all_joined_units)
+ * [all_requests](docs/requires.md#grafanadashboardrequires-all_requests)
+ * [all_units](docs/requires.md#grafanadashboardrequires-all_units)
+ * [endpoint_name](docs/requires.md#grafanadashboardrequires-endpoint_name)
+ * [is_joined](docs/requires.md#grafanadashboardrequires-is_joined)
+ * [joined](docs/requires.md#grafanadashboardrequires-joined)
+ * [manage_flags](docs/requires.md#grafanadashboardrequires-manage_flags)
+ * [new_requests](docs/requires.md#grafanadashboardrequires-new_requests)
+ * [relations](docs/requires.md#grafanadashboardrequires-relations)
+
+
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Grafana Dashboards]: https://grafana.com/grafana/dashboards
+[Grafana Dashboard Reference]: https://grafana.com/docs/reference/dashboard/
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/__init__.py b/kubernetes-master/hooks/relations/grafana-dashboard/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/common.py b/kubernetes-master/hooks/relations/grafana-dashboard/common.py
new file mode 100644
index 0000000..99db2d8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/common.py
@@ -0,0 +1,38 @@
+from charms.reactive import BaseRequest, BaseResponse, Field
+
+
+class ImportResponse(BaseResponse):
+ success = Field(description='Whether or not the import succeeded')
+ reason = Field(description='If failed, a description of why')
+
+ @property
+ def name(self):
+ """
+ The name given when the import was requested.
+ """
+ return self.request.name
+
+
+class ImportRequest(BaseRequest):
+ RESPONSE_CLASS = ImportResponse
+
+ name = Field(description="""
+ Name of the dashboard to import. Informational only, so that
+ you can tell which dashboard request this was, e.g. to check
+ for success or failure.
+ """)
+
+ dashboard = Field(description="""
+ Data structure defining the dashboard. Must be JSON
+ serializable. (Note: This should *not* be pre-serialized
+ JSON.)
+ """)
+
+ def respond(self, success, reason=None):
+ """
+ Acknowledge this request, and indicate success or failure with an
+ optional explanation.
+ """
+ # wrap the base respond method to make the success field required and
+ # positional, as well as to provide a better doc string
+ super().respond(success=success, reason=reason)
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/copyright b/kubernetes-master/hooks/relations/grafana-dashboard/copyright
new file mode 100644
index 0000000..69768db
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/docs/common.md b/kubernetes-master/hooks/relations/grafana-dashboard/docs/common.md
new file mode 100644
index 0000000..ab7de1d
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/docs/common.md
@@ -0,0 +1,50 @@
+# `class ImportRequest(BaseRequest)`
+
+Base class for requests using the request / response pattern.
+
+Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of
+the :class:`BaseResponse` which defines the fields that the response will
+use. They must also define additional attributes as :class:`Field`s.
+
+For example::
+
+ class TLSResponse(BaseResponse):
+ key = Field('Private key for the cert')
+ cert = Field('Public cert info')
+
+
+ class TLSRequest(BaseRequest):
+ RESPONSE_CLASS = TLSResponse
+
+ common_name = Field('Common Name (CN) for the cert to be created')
+ sans = Field('List of Subject Alternative Names (SANs)')
+
+## `egress_subnets`
+
+Subnets over which network traffic to the requester will flow.
+
+## `ingress_address`
+
+Address to use if a connection to the requester is required.
+
+## `is_created`
+
+Whether this request was created by this side of the relation.
+
+## `is_received`
+
+Whether this request was received by the other side of the relation.
+
+## `def respond(self, success, reason=None)`
+
+Acknowledge this request, and indicate success or failure with an
+optional explanation.
+
+# `class ImportResponse(BaseResponse)`
+
+Base class for responses using the request / response pattern.
+
+## `name`
+
+The name given when the import was requested.
+
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/docs/provides.md b/kubernetes-master/hooks/relations/grafana-dashboard/docs/provides.md
new file mode 100644
index 0000000..cc1f3dc
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/docs/provides.md
@@ -0,0 +1,120 @@
+# `class GrafanaDashboardProvides(RequesterEndpoint)`
+
+Base class for Endpoints that create requests in the request / response
+pattern.
+
+Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass
+of :class:`BaseRequest` which defines the fields the request will use.
+
+## `all_departed_units`
+
+Collection of all units that were previously part of any relation on
+this endpoint but which have since departed.
+
+This collection is persistent and mutable. The departed units will
+be kept until they are explicitly removed, to allow for reasonable
+cleanup of units that have left.
+
+Example: You need to run a command each time a unit departs the relation.
+
+.. code-block:: python
+
+ @when('endpoint.{endpoint_name}.departed')
+ def handle_departed_unit(self):
+ for name, unit in self.all_departed_units.items():
+ # run the command to remove `unit` from the cluster
+ # ..
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+Once a unit is departed, it will no longer show up in
+:attr:`all_joined_units`. Note that units are considered departed as
+soon as the departed hook is entered, which differs slightly from how
+the Juju primitives behave (departing units are still returned from
+``related-units`` until after the departed hook is complete).
+
+This collection is a :class:`KeyList`, so can be used as a mapping to
+look up units by their unit name, or iterated or accessed by index.
+
+## `all_joined_units`
+
+A list view of all the units of all relations attached to this
+:class:`~charms.reactive.endpoints.Endpoint`.
+
+This is actually a
+:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units
+will be in order by relation ID and then unit name, and you can access a
+merged view of all the units' data as a single mapping. You should be
+very careful when using the merged data collections, however, and
+consider carefully what will happen when the endpoint has multiple
+relations and multiple remote units on each. It is probably better to
+iterate over each unit and handle its data individually. See
+:class:`~charms.reactive.endpoints.CombinedUnitsView` for an
+explanation of how the merged data collections work.
+
+Note that, because a given application might be related multiple times
+on a given endpoint, units may show up in this collection more than
+once.
+
+## `all_units`
+
+.. deprecated:: 0.6.1
+ Use :attr:`all_joined_units` instead
+
+## `endpoint_name`
+
+Relation name of this endpoint.
+
+## `failed_imports`
+
+A list of requests that failed to import.
+
+## `is_joined`
+
+Whether this endpoint has remote applications attached to it.
+
+## `joined`
+
+.. deprecated:: 0.6.3
+ Use :attr:`is_joined` instead
+
+## `def manage_flags(self)`
+
+Method that subclasses can override to perform any flag management
+needed during startup.
+
+This will be called automatically after the framework-managed automatic
+flags have been updated.
+
+## `def register_dashboard(self, name, dashboard)`
+
+Request a dashboard to be imported.
+
+:param name: Name of dashboard. Informational only, so that you can
+ tell which dashboard request this was, e.g. to check for success or
+ failure.
+:param dashboard: Data structure defining the dashboard. Must be JSON
+ serializable. (Note: This should *not* be pre-serialized JSON.)
+
+## `relations`
+
+Collection of :class:`Relation` instances that are established for
+this :class:`Endpoint`.
+
+This is a :class:`KeyList`, so it can be iterated and indexed as a list,
+or you can look up relations by their ID. For example::
+
+ rel0 = endpoint.relations[0]
+ assert rel0 is endpoint.relations[rel0.relation_id]
+ assert all(rel is endpoint.relations[rel.relation_id]
+ for rel in endpoint.relations)
+ print(', '.join(endpoint.relations.keys()))
+
+## `requests`
+
+A list of all requests which have been submitted.
+
+## `responses`
+
+A list of all responses which have been received.
+
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/docs/requires.md b/kubernetes-master/hooks/relations/grafana-dashboard/docs/requires.md
new file mode 100644
index 0000000..c84f1bc
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/docs/requires.md
@@ -0,0 +1,109 @@
+# `class GrafanaDashboardRequires(ResponderEndpoint)`
+
+Base class for Endpoints that respond to requests in the request / response
+pattern.
+
+Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass
+of :class:`BaseRequest` which defines the fields the request will use.
+
+## `all_departed_units`
+
+Collection of all units that were previously part of any relation on
+this endpoint but which have since departed.
+
+This collection is persistent and mutable. The departed units will
+be kept until they are explicitly removed, to allow for reasonable
+cleanup of units that have left.
+
+Example: You need to run a command each time a unit departs the relation.
+
+.. code-block:: python
+
+ @when('endpoint.{endpoint_name}.departed')
+ def handle_departed_unit(self):
+ for name, unit in self.all_departed_units.items():
+ # run the command to remove `unit` from the cluster
+ # ..
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+Once a unit is departed, it will no longer show up in
+:attr:`all_joined_units`. Note that units are considered departed as
+soon as the departed hook is entered, which differs slightly from how
+the Juju primitives behave (departing units are still returned from
+``related-units`` until after the departed hook is complete).
+
+This collection is a :class:`KeyList`, so can be used as a mapping to
+look up units by their unit name, or iterated or accessed by index.
+
+## `all_joined_units`
+
+A list view of all the units of all relations attached to this
+:class:`~charms.reactive.endpoints.Endpoint`.
+
+This is actually a
+:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units
+will be in order by relation ID and then unit name, and you can access a
+merged view of all the units' data as a single mapping. You should be
+very careful when using the merged data collections, however, and
+consider carefully what will happen when the endpoint has multiple
+relations and multiple remote units on each. It is probably better to
+iterate over each unit and handle its data individually. See
+:class:`~charms.reactive.endpoints.CombinedUnitsView` for an
+explanation of how the merged data collections work.
+
+Note that, because a given application might be related multiple times
+on a given endpoint, units may show up in this collection more than
+once.
+
+## `all_requests`
+
+A list of all requests, including ones which have been responded to.
+
+## `all_units`
+
+.. deprecated:: 0.6.1
+ Use :attr:`all_joined_units` instead
+
+## `endpoint_name`
+
+Relation name of this endpoint.
+
+## `is_joined`
+
+Whether this endpoint has remote applications attached to it.
+
+## `joined`
+
+.. deprecated:: 0.6.3
+ Use :attr:`is_joined` instead
+
+## `def manage_flags(self)`
+
+Method that subclasses can override to perform any flag management
+needed during startup.
+
+This will be called automatically after the framework-managed automatic
+flags have been updated.
+
+## `new_requests`
+
+A list of requests which have not been responded.
+
+Requests should be handled by the charm and then responded to by
+calling ``request.respond(...)``.
+
+## `relations`
+
+Collection of :class:`Relation` instances that are established for
+this :class:`Endpoint`.
+
+This is a :class:`KeyList`, so it can be iterated and indexed as a list,
+or you can look up relations by their ID. For example::
+
+ rel0 = endpoint.relations[0]
+ assert rel0 is endpoint.relations[rel0.relation_id]
+ assert all(rel is endpoint.relations[rel.relation_id]
+ for rel in endpoint.relations)
+ print(', '.join(endpoint.relations.keys()))
+
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/interface.yaml b/kubernetes-master/hooks/relations/grafana-dashboard/interface.yaml
new file mode 100644
index 0000000..0ee9ef8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/interface.yaml
@@ -0,0 +1,6 @@
+name: grafana-dashboard
+summary: Interface for importing dashboards into Grafana
+version: 1
+maintainer: "Cory Johns "
+exclude:
+ - .docs
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/provides.py b/kubernetes-master/hooks/relations/grafana-dashboard/provides.py
new file mode 100644
index 0000000..670ded9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/provides.py
@@ -0,0 +1,42 @@
+from charms.reactive import (
+ toggle_flag,
+ RequesterEndpoint,
+)
+
+from .common import ImportRequest
+
+
+class GrafanaDashboardProvides(RequesterEndpoint):
+ REQUEST_CLASS = ImportRequest
+
+ def manage_flags(self):
+ super().manage_flags()
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.failed'),
+ self.is_joined and self.failed_imports)
+
+ @property
+ def failed_imports(self):
+ """
+ A list of requests that failed to import.
+ """
+ return [response
+ for response in self.responses
+ if not response.success]
+
+ def register_dashboard(self, name, dashboard):
+ """
+ Request a dashboard to be imported.
+
+ :param name: Name of dashboard. Informational only, so that you can
+ tell which dashboard request this was, e.g. to check for success or
+ failure.
+ :param dashboard: Data structure defining the dashboard. Must be JSON
+ serializable. (Note: This should *not* be pre-serialized JSON.)
+ """
+ # we might be connected to multiple grafanas for some strange
+ # reason, so just send the dashboard to all of them
+ for relation in self.relations:
+ ImportRequest.create_or_update(match_fields=['name'],
+ relation=relation,
+ name=name,
+ dashboard=dashboard)
diff --git a/kubernetes-master/hooks/relations/grafana-dashboard/requires.py b/kubernetes-master/hooks/relations/grafana-dashboard/requires.py
new file mode 100644
index 0000000..de696c1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/grafana-dashboard/requires.py
@@ -0,0 +1,15 @@
+from charms.reactive import (
+ toggle_flag,
+ ResponderEndpoint,
+)
+
+from .common import ImportRequest
+
+
+class GrafanaDashboardRequires(ResponderEndpoint):
+ REQUEST_CLASS = ImportRequest
+
+ def manage_flags(self):
+ super().manage_flags()
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.requests'),
+ self.is_joined and self.new_requests)
diff --git a/kubernetes-master/hooks/relations/hacluster/.stestr.conf b/kubernetes-master/hooks/relations/hacluster/.stestr.conf
new file mode 100644
index 0000000..5fcccac
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=./unit_tests
+top_dir=./
diff --git a/kubernetes-master/hooks/relations/hacluster/README.md b/kubernetes-master/hooks/relations/hacluster/README.md
new file mode 100644
index 0000000..e8147ac
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/README.md
@@ -0,0 +1,90 @@
+# Overview
+
+This interface handles the communication with the hacluster subordinate
+charm using the `ha` interface protocol.
+
+# Usage
+
+## Requires
+
+The interface layer will set the following reactive states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established and ready for
+ the local charm to configure the hacluster subordinate charm. The
+ configuration of the resources to manage for the hacluster charm
+ can be managed via one of the following methods:
+
+ * `manage_resources` method
+ * `bind_on` method
+
+ Configuration of the managed resources within the hacluster can be
+ managed by passing `common.CRM` object definitions to the
+ `manage_resources` method.
+
+ * `{relation_name}.available` The hacluster is up and ready.
+
+For example:
+```python
+from charms.reactive import when, when_not
+from charms.reactive import set_state, remove_state
+
+from relations.hacluster.common import CRM
+
+
+@when('ha.connected')
+def cluster_connected(hacluster):
+
+ resources = CRM()
+ resources.primitive('res_vip', 'ocf:IPAddr2',
+ params='ip=10.0.3.100 nic=eth0',
+ op='monitor interval="10s"')
+ resources.clone('cl_res_vip', 'res_vip')
+
+ hacluster.bind_on(iface='eth0', mcastport=4430)
+ hacluster.manage_resources(resources)
+```
+
+Additionally, for more code clarity a custom object implements the interface
+defined in common.ResourceDescriptor can be used to simplify the code for
+reuse.
+
+For example:
+```python
+import ipaddress
+
+from relation.hacluster.common import CRM
+from relation.hacluster.common import ResourceDescriptor
+
+class VirtualIP(ResourceDescriptor):
+ def __init__(self, vip, nic='eth0'):
+ self.vip = vip
+ self.nic = 'eth0'
+
+ def configure_resource(self, crm):
+ ipaddr = ipaddress.ip_address(self.vip)
+ if isinstance(ipaddr, ipaddress.IPv4Address):
+ res_type = 'ocf:heartbeat:IPAddr2'
+ res_parms = 'ip={ip} nic={nic}'.format(ip=self.vip,
+ nic=self.nic)
+ else:
+ res_type = 'ocf:heartbeat:IPv6addr'
+ res_params = 'ipv6addr={ip} nic={nic}'.format(ip=self.vip,
+ nic=self.nic)
+
+ crm.primitive('res_vip', res_type, params=res_params,
+ op='monitor interval="10s"')
+ crm.clone('cl_res_vip', 'res_vip')
+```
+
+Once the VirtualIP class above has been defined in charm code, it can make
+the code a bit cleaner. The example above can thusly be written as:
+
+```python
+@when('ha.connected')
+def cluster_connected(hacluster):
+ resources = CRM()
+ resources.add(VirtualIP('10.0.3.100'))
+
+ hacluster.bind_on(iface='eth0', mcastport=4430)
+ hacluster.manage_resources(resources)
+```
diff --git a/kubernetes-master/hooks/relations/hacluster/__init__.py b/kubernetes-master/hooks/relations/hacluster/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/hacluster/common.py b/kubernetes-master/hooks/relations/hacluster/common.py
new file mode 100644
index 0000000..d896510
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/common.py
@@ -0,0 +1,726 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import ipaddress
+from six import string_types
+
+
+class CRM(dict):
+ """
+ Configuration object for Pacemaker resources for the HACluster
+ interface. This class provides access to the supported resources
+ available in the 'crm configure' within the HACluster.
+
+ See Also
+ --------
+ More documentation is available regarding the definitions of
+ primitives, clones, and other pacemaker resources at the crmsh
+ site at http://crmsh.github.io/man
+ """
+
+ # Constants provided for ordering constraints (e.g. the kind value)
+ MANDATORY = "Mandatory"
+ OPTIONAL = "Optional"
+ SERIALIZE = "Serialize"
+
+ # Constants defining weights of constraints
+ INFINITY = "inf"
+ NEG_INFINITY = "-inf"
+
+ # Constaints aliased to their interpretations for constraints
+ ALWAYS = INFINITY
+ NEVER = NEG_INFINITY
+
+ def __init__(self, *args, **kwargs):
+ self['resources'] = {}
+ self['delete_resources'] = []
+ self['resource_params'] = {}
+ self['groups'] = {}
+ self['ms'] = {}
+ self['orders'] = {}
+ self['colocations'] = {}
+ self['clones'] = {}
+ self['locations'] = {}
+ self['init_services'] = []
+ self['systemd_services'] = []
+ super(CRM, self).__init__(*args, **kwargs)
+
+ def primitive(self, name, agent, description=None, **kwargs):
+ """Configures a primitive resource within Pacemaker.
+
+ A primitive is used to describe a resource which should be managed
+ by the cluster. Primitives consist of a name, the agent type, and
+ various configuration options to the primitive. For example:
+
+ crm.primitive('www8', 'apache',
+ params='configfile=/etc/apache/www8.conf',
+ operations='$id-ref=apache_ops')
+
+ will create the an apache primitive (resource) for the www8 service
+ hosted by the Apache HTTP server. The parameters specified can either
+ be provided individually (e.g. a string) or as an iterable.
+
+ The following example shows how to specify multiple ops for a drbd
+ volume in a Master/Slave configuration::
+
+ ops = ['monitor role=Master interval=60s',
+ 'monitor role=Slave interval=300s']
+
+ crm.primitive('r0', 'ocf:linbit:drbd',
+ params='drbd_resource=r0',
+ op=ops)
+
+ Additional arguments may be passed in as kwargs in which the key of
+ the kwarg is prepended to the value.
+
+ Parameters
+ ----------
+ name: str
+ the name of the primitive.
+ agent: str
+ the type of agent to use to monitor the primitive resource
+ (e.g. ocf:linbit:drbd).
+ description: str, optional, kwarg
+ a description about the resource
+ params: str or iterable, optional, kwarg
+ parameters which are provided to the resource agent
+ meta: str or iterable, optional, kwarg
+ metadata information for the primitive resource
+ utilization: str or iterable, optional, kwarg
+ utilization information for the primitive resource
+ operations: str or iterable, optional, kwarg
+ operations information for the primitive resource in id_spec
+ format (e.g. $id= or $id-ref=)
+ op: str or iterable, optional, kwarg
+ op information regarding the primitive resource. This takes the
+ form of ' [== ...]'
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_primitive
+ """
+ resources = self['resources']
+ resources[name] = agent
+
+ specs = ''
+ if description:
+ specs = specs + 'description="%s"' % description
+
+ # Use the ordering specified in the crm manual
+ for key in 'params', 'meta', 'utilization', 'operations', 'op':
+ if key not in kwargs:
+ continue
+ specs = specs + (' %s' % self._parse(key, kwargs[key]))
+
+ if specs:
+ self['resource_params'][name] = specs
+
+ def _parse(self, prefix, data):
+ results = ''
+ if isinstance(data, string_types):
+ data = [data]
+
+ first = True
+ for d in data:
+ if first:
+ results = results + ' '
+ first = False
+ results = results + ('%s %s ' % (prefix, d))
+ results = results.rstrip()
+ return results
+
+ def clone(self, name, resource, description=None, **kwargs):
+ """Creates a resource which should run on all nodes.
+
+ Parameters
+ ----------
+ name: str
+ the name of the clone
+ resource: str
+ the name or id of the resource to clone
+ description: str, optional
+ text containing a description for the clone
+ meta: str or list of str, optional, kwarg
+ metadata attributes to assign to the clone
+ params: str or list of str, optional, kwarg
+ parameters to assign to the clone
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_clone
+ """
+ clone_specs = resource
+ if description:
+ clone_specs = clone_specs + (' description="%s"' % description)
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ if not value:
+ continue
+ clone_specs = clone_specs + (' %s' % self._parse(key, value))
+
+ self['clones'][name] = clone_specs
+
+ def colocation(self, name, score=ALWAYS, *resources, **kwargs):
+ """Configures the colocation constraints of resources.
+
+ Provides placement constraints regarding resources defined within
+ the cluster. Using the colocate function, resource affinity or
+ anti-affinity can be defined.
+
+ For example, the following code ensures that the nova-console service
+ always runs where the cluster vip is running:
+
+ crm.colocation('console_with_vip', ALWAYS,
+ 'nova-console', 'vip')
+
+ The affinity or anti-affinity of resources relationships is be
+ expressed in the `score` parameter. A positive score indicates that
+ the resources should run on the same node.A score of INFINITY (or
+ ALWAYS) will ensure the resources are always run on the same node(s)
+ and a score of NEG_INFINITY (or NEVER) ensures that the resources are
+ never run on the same node(s).
+
+ crm.colocation('never_apache_with_dummy', NEVER,
+ 'apache', 'dummy')
+
+ Any *resources values which are provided are treated as resources which
+ the colocation constraint applies to. At least two resources must be
+ defined as part of the ordering constraint.
+
+ The resources take the form of [:role]. If the
+ colocation constraint applies specifically to a role, this information
+ should be included int he resource supplied.
+
+ Parameters
+ ----------
+ id: str
+ id or name of the colocation constraint
+ score: str {ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY} or int
+ the score or weight of the colocation constraint. A positive value
+ will indicate that the resources should run on the same node. A
+ negative value indicates that the resources should run on separate
+ nodes.
+ resources: str or list
+ the list of resources which the colocation constraint applies to.
+ node_attribute: str, optional, kwarg
+ can be used to run the resources on a set of nodes, not just a
+ single node.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_colocation
+ """
+ specs = '%s: %s' % (score, ' '.join(resources))
+ if 'node_attribute' in kwargs:
+ specs = specs + (' node-attribute=%s' % kwargs['node_attribute'])
+ self['colocations'][name] = specs
+
+ def group(self, name, *resources, **kwargs):
+ """Creates a group of resources within Pacemaker.
+
+ The created group includes the list of resources provided in the list
+ of resources supplied. For example::
+
+ crm.group('grp_mysql', 'res_mysql_rbd', 'res_mysql_fs',
+ 'res_mysql_vip', 'res_mysqld')
+
+ will create the 'grp_mysql' resource group consisting of the
+ res_mysql_rbd, res_mysql_fs, res_mysql_vip, and res_mysqld resources.
+
+ Parameters
+ ----------
+ name: str
+ the name of the group resource
+ resources: list of str
+ the names or ids of resources to include within the group.
+ description: str, optional, kwarg
+ text to describe the resource
+ meta: str or list of str, optional, kwarg
+ metadata attributes to assign to the group
+ params: str or list of str, optional, kwarg
+ parameters to assign to the group
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_group
+ """
+ specs = ' '.join(resources)
+ if 'description' in kwargs:
+ specs = specs + (' description=%s"' % kwargs['description'])
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ specs = specs + (' %s' % self._parse(key, value))
+
+ self['groups'][name] = specs
+
+ def remove_deleted_resources(self):
+ """Work through the existing resources and remove any mention of ones
+ which have been marked for deletion."""
+ for res in self['delete_resources']:
+ for key in self.keys():
+ if key == 'delete_resources':
+ continue
+ if isinstance(self[key], dict) and res in self[key].keys():
+ del self[key][res]
+ elif isinstance(self[key], list) and res in self[key]:
+ self[key].remove(res)
+ elif isinstance(self[key], tuple) and res in self[key]:
+ self[key] = tuple(x for x in self[key] if x != res)
+
+ def delete_resource(self, *resources):
+ """Specify objects/resources to be deleted from within Pacemaker. This
+ is not additive, the list of resources is set to exaclty what was
+ passed in.
+
+ Parameters
+ ----------
+ resources: str or list
+ the name or id of the specific resource to delete.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_delete
+ """
+ self['delete_resources'] = resources
+ self.remove_deleted_resources()
+
+ def add_delete_resource(self, resource):
+ """Specify an object/resource to delete from within Pacemaker. It can
+ be called multiple times to add additional resources to the deletion
+ list.
+
+ Parameters
+ ----------
+ resources: str
+ the name or id of the specific resource to delete.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_delete
+ """
+ if resource not in self['delete_resources']:
+ # NOTE(fnordahl): this unpleasant piece of code is regrettably
+ # necessary for Python3.4 (and trusty) compability see LP: #1814218
+ # and LP: #1813982
+ self['delete_resources'] = tuple(
+ self['delete_resources'] or ()) + (resource,)
+ self.remove_deleted_resources()
+
+ def init_services(self, *resources):
+ """Specifies that the service(s) is an init or upstart service.
+
+ Services (resources) which are noted as upstart services are
+ disabled, stopped, and left to pacemaker to manage the resource.
+
+ Parameters
+ ----------
+ resources: str or list of str, varargs
+ The resources which should be noted as init services.
+
+ Returns
+ -------
+ None
+ """
+ self['init_services'] = resources
+
+ def systemd_services(self, *resources):
+ """Specifies that the service(s) is a systemd service.
+
+ Services (resources) which are noted as systemd services are
+ disabled, stopped, and left to pacemaker to manage the resource.
+
+ Parameters
+ ----------
+ resources: str or list of str, varargs
+ The resources which should be noted as systemd services.
+
+ Returns
+ -------
+ None
+ """
+ self['systemd_services'] = resources
+
+ def ms(self, name, resource, description=None, **kwargs):
+ """Create a master/slave resource type.
+
+ The following code provides an example of creating a master/slave
+ resource on drbd disk1::
+
+ crm.ms('disk1', 'drbd1', meta='notify=true globally-unique=false')
+
+ Parameters
+ ----------
+ name: str
+ the name or id of the master resource
+ resource: str
+ the name or id of the resource which now ha a master/slave
+ assocation tied to it.
+ description: str, optional
+ a textual description of the master resource
+ meta: str or list of strs, optional, kwargs
+ strings defining the metadata for the master/slave resource type
+ params: str or list of strs, optional, kwargs
+ parameter strings which should be passed to the master/slave
+ resource creation
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_ms
+ """
+ specs = resource
+ if description:
+ specs = specs + (' description="%s"' % description)
+
+ for key in 'meta', 'params':
+ if key not in kwargs:
+ continue
+ value = kwargs[key]
+ specs = specs + (' %s' % self._parse(key, value))
+
+ self['ms'][name] = specs
+
+ def location(self, name, resource, **kwargs):
+ """Defines the preference of nodes for the given resource.
+
+ The location constraitns consist of one or more rules which specify
+ a score to be awarded if the rules match.
+
+ Parameters
+ ----------
+ name: str
+ the name or id of the location constraint
+ resource: str
+ the name, id, resource, set, tag, or resoruce pattern defining the
+ set of resources which match the location placement constraint.
+ attributes: str or list str, optional, kwarg
+ attributes which should be assigned to the location constraint
+ rule: str or list of str, optional, kwarg
+ the rule(s) which define the location constraint rules when
+ selecting a location to run the resource.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_location
+ """
+ specs = resource
+
+ # Check if there are attributes assigned to the location and if so,
+ # format the spec string with the attributes
+ if 'attributes' in kwargs:
+ attrs = kwargs['attributes']
+ if isinstance(attrs, string_types):
+ attrs = [attrs]
+ specs = specs + (' %s' % ' '.join(attrs))
+
+ if 'rule' in kwargs:
+ rules = kwargs['rule']
+ specs = specs + (' %s' % self._parse('rule', rules))
+
+ self['locations'][name] = specs
+
+ def order(self, name, score=None, *resources, **kwargs):
+ """Configures the ordering constraints of resources.
+
+ Provides ordering constraints to resources defined in a Pacemaker
+ cluster which affect the way that resources are started, stopped,
+ promoted, etc. Basic ordering is provided by simply specifying the
+ ordering name and an ordered list of the resources which the ordering
+ constraint applies to.
+
+ For example, the following code ensures that the apache resource is
+ started after the ClusterIP is started::
+
+ hacluster.order('apache-after-ip', 'ClusterIP', 'apache')
+
+ By default, the ordering constraint will specify that the ordering
+ constraint is mandatory. The constraint behavior can be specified
+ using the 'score' keyword argument, e.g.::
+
+ hacluster.order('apache-after-ip', score=hacluster.OPTIONAL,
+ 'ClusterIP', 'apache')
+
+ Any *resources values which are provided are treated as resources which
+ the ordering constraint applies to. At least two resources must be
+ defined as part of the ordering constraint.
+
+ The resources take the form of [:]. If the
+ ordering constraint applies to a specific action for the resource,
+ this information should be included in the resource supplied.
+
+ Parameters
+ ----------
+ name: str
+ the id or name of the order constraint
+ resoures: str or list of strs in varargs format
+ the resources the ordering constraint applies to. The ordering
+ of the list of resources is used to provide the ordering.
+ score: {MANDATORY, OPTIONAL, SERIALIZED}, optional
+ the score of the ordering constraint.
+ symmetrical: boolean, optional, kwarg
+ when True, then the services for the resources will be stopped in
+ the reverse order. The default value for this is True.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ http://crmsh.github.io/man/#cmdhelp_configure_order
+ """
+ specs = ''
+ if score:
+ specs = '%s:' % score
+
+ specs = specs + (' %s' % ' '.join(resources))
+ if 'symmetrical' in kwargs:
+ specs = specs + (' symmetrical=' % kwargs['symmetrical'])
+
+ self['orders'][name] = specs
+
+ def add(self, resource_desc):
+ """Adds a resource descriptor object to the CRM configuration.
+
+ Adds a `ResourceDescriptor` object to the CRM configuration which
+ understands how to configure the resource itself. The
+ `ResourceDescriptor` object needs to know how to interact with this
+ CRM class in order to properly configure the pacemaker resources.
+
+ The minimum viable resource descriptor object will implement a method
+ which takes a reference parameter to this CRM in order to configure
+ itself.
+
+ Parameters
+ ----------
+ resource_desC: ResourceDescriptor
+ an object which provides an abstraction of a monitored resource
+ within pacemaker.
+
+ Returns
+ -------
+ None
+ """
+ method = getattr(resource_desc, 'configure_resource', None)
+ if not callable(method):
+ raise ValueError('Invalid resource_desc. The "configure_resource"'
+ ' function has not been defined.')
+
+ method(self)
+
+
+class ResourceDescriptor(object):
+ """
+ A ResourceDescriptor provides a logical resource or concept and knows
+ how to configure pacemaker.
+ """
+
+ def configure_resource(self, crm):
+ """Configures the logical resource(s) within the CRM.
+
+ This is the callback method which is invoked by the CRM in order
+ to allow this ResourceDescriptor to fully configure the logical
+ resource.
+
+ For example, a Virtual IP may provide a standard abstraction and
+ configure the specific details under the covers.
+ """
+ pass
+
+
+class InitService(ResourceDescriptor):
+ def __init__(self, service_name, init_service_name, clone=True):
+ """Class for managing init resource
+
+ :param service_name: string - Name of service
+ :param init_service_name: string - Name service uses in init system
+ :param clone: bool - clone service across all units
+ :returns: None
+ """
+ self.service_name = service_name
+ self.init_service_name = init_service_name
+ self.clone = clone
+
+ def configure_resource(self, crm):
+ """"Configure new init system service resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ res_key = 'res_{}_{}'.format(
+ self.service_name.replace('-', '_'),
+ self.init_service_name.replace('-', '_'))
+ res_type = 'lsb:{}'.format(self.init_service_name)
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ res_key, res_type, op='monitor interval="5s"', meta=_meta)
+ crm.init_services(self.init_service_name)
+ if self.clone:
+ clone_key = 'cl_{}'.format(res_key)
+ crm.clone(clone_key, res_key)
+
+
+class VirtualIP(ResourceDescriptor):
+ def __init__(self, service_name, vip, nic=None, cidr=None):
+ """Class for managing VIP resource
+
+ :param service_name: string - Name of service
+ :param vip: string - Virtual IP to be managed
+ :param nic: string - Network interface to bind vip to
+ :param cidr: string - Netmask for vip
+ :returns: None
+ """
+ self.service_name = service_name
+ self.vip = vip
+ self.nic = nic
+ self.cidr = cidr
+
+ def configure_resource(self, crm):
+ """Configure new vip resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ if self.nic:
+ vip_key = 'res_{}_{}_vip'.format(self.service_name, self.nic)
+ else:
+ vip_key = 'res_{}_{}_vip'.format(
+ self.service_name,
+ hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7])
+ ipaddr = ipaddress.ip_address(self.vip)
+ if isinstance(ipaddr, ipaddress.IPv4Address):
+ res_type = 'ocf:heartbeat:IPaddr2'
+ res_params = 'ip="{}"'.format(self.vip)
+ else:
+ res_type = 'ocf:heartbeat:IPv6addr'
+ res_params = 'ipv6addr="{}"'.format(self.vip)
+ vip_params = 'ipv6addr'
+ vip_key = 'res_{}_{}_{}_vip'.format(self.service_name, self.nic,
+ vip_params)
+
+ if self.nic:
+ res_params = '{} nic="{}"'.format(res_params, self.nic)
+ if self.cidr:
+ res_params = '{} cidr_netmask="{}"'.format(res_params, self.cidr)
+ # Monitor the VIP
+ _op_monitor = 'monitor timeout="20s" interval="10s" depth="0"'
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ vip_key, res_type, params=res_params, op=_op_monitor, meta=_meta)
+
+
+class DNSEntry(ResourceDescriptor):
+
+ def __init__(self, service_name, ip, fqdn, endpoint_type):
+ """Class for managing DNS entries
+
+ :param service_name: string - Name of service
+ :param ip: string - IP to point DNS entry at
+ :param fqdn: string - DNS Entry
+ :param endpoint_type: string - The type of the endpoint represented by
+ the DNS record eg public, admin etc
+ :returns: None
+ """
+ self.service_name = service_name
+ self.ip = ip
+ self.fqdn = fqdn
+ self.endpoint_type = endpoint_type
+
+ def configure_resource(self, crm, res_type='ocf:maas:dns'):
+ """Configure new DNS resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :param res_type: string - Corosync Open Cluster Framework resource
+ agent to use for DNS HA
+ :returns: None
+ """
+ res_key = 'res_{}_{}_hostname'.format(
+ self.service_name.replace('-', '_'),
+ self.endpoint_type)
+ res_params = ''
+ if self.fqdn:
+ res_params = '{} fqdn="{}"'.format(res_params, self.fqdn)
+ if self.ip:
+ res_params = '{} ip_address="{}"'.format(res_params, self.ip)
+ crm.primitive(res_key, res_type, params=res_params)
+
+
+class SystemdService(ResourceDescriptor):
+ def __init__(self, service_name, systemd_service_name, clone=True):
+ """Class for managing systemd resource
+
+ :param service_name: string - Name of service
+ :param systemd_service_name: string - Name service uses in
+ systemd system
+ :param clone: bool - clone service across all units
+ :returns: None
+ """
+ self.service_name = service_name
+ self.systemd_service_name = systemd_service_name
+ self.clone = clone
+
+ def configure_resource(self, crm):
+ """"Configure new systemd system service resource in crm
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ res_key = 'res_{}_{}'.format(
+ self.service_name.replace('-', '_'),
+ self.systemd_service_name.replace('-', '_'))
+ res_type = 'systemd:{}'.format(self.systemd_service_name)
+ _meta = 'migration-threshold="INFINITY" failure-timeout="5s"'
+ crm.primitive(
+ res_key, res_type, op='monitor interval="5s"', meta=_meta)
+ crm.systemd_services(self.systemd_service_name)
+ if self.clone:
+ clone_key = 'cl_{}'.format(res_key)
+ crm.clone(clone_key, res_key)
diff --git a/kubernetes-master/hooks/relations/hacluster/copyright b/kubernetes-master/hooks/relations/hacluster/copyright
new file mode 100644
index 0000000..5a49dcb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/copyright
@@ -0,0 +1,21 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
+
+Files: *
+Copyright: 2015, Canonical Ltd.
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in `/usr/share/common-licenses/Apache-2.0'.
diff --git a/kubernetes-master/hooks/relations/hacluster/interface.yaml b/kubernetes-master/hooks/relations/hacluster/interface.yaml
new file mode 100644
index 0000000..edd0c90
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/interface.yaml
@@ -0,0 +1,13 @@
+name: hacluster
+summary: |
+ Provides the hacluster interface used for configuring Corosync
+ and Pacemaker services.
+maintainer: OpenStack Charmers
+ignore:
+ - '.gitignore'
+ - '.gitreview'
+ - '.testr.conf'
+ - 'test-requirements'
+ - 'tox.ini'
+ - 'unit_tests'
+ - '.zuul.yaml'
diff --git a/kubernetes-master/hooks/relations/hacluster/requires.py b/kubernetes-master/hooks/relations/hacluster/requires.py
new file mode 100644
index 0000000..9b72d97
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/requires.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import hashlib
+
+import relations.hacluster.common
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+from charms.reactive.helpers import data_changed
+from charmhelpers.core import hookenv
+
+
+class HAClusterRequires(RelationBase):
+ # The hacluster charm is a subordinate charm and really only works
+ # for a single service to the HA Cluster relation, therefore set the
+ # expected scope to be GLOBAL.
+ scope = scopes.GLOBAL
+
+ @hook('{requires:hacluster}-relation-joined')
+ def joined(self):
+ self.set_state('{relation_name}.connected')
+
+ @hook('{requires:hacluster}-relation-changed')
+ def changed(self):
+ if self.is_clustered():
+ self.set_state('{relation_name}.available')
+ else:
+ self.remove_state('{relation_name}.available')
+
+ @hook('{requires:hacluster}-relation-{broken,departed}')
+ def departed(self):
+ self.remove_state('{relation_name}.available')
+ self.remove_state('{relation_name}.connected')
+
+ def is_clustered(self):
+ """Has the hacluster charm set clustered?
+
+ The hacluster charm sets cluster=True when it determines it is ready.
+ Check the relation data for clustered and force a boolean return.
+
+ :returns: boolean
+ """
+ clustered_values = self.get_remote_all('clustered')
+ if clustered_values:
+ # There is only ever one subordinate hacluster unit
+ clustered = clustered_values[0]
+ # Future versions of hacluster will return a bool
+ # Current versions return a string
+ if type(clustered) is bool:
+ return clustered
+ elif (clustered is not None and
+ (clustered.lower() == 'true' or
+ clustered.lower() == 'yes')):
+ return True
+ return False
+
+ def bind_on(self, iface=None, mcastport=None):
+ relation_data = {}
+ if iface:
+ relation_data['corosync_bindiface'] = iface
+ if mcastport:
+ relation_data['corosync_mcastport'] = mcastport
+
+ if relation_data and data_changed('hacluster-bind_on', relation_data):
+ self.set_local(**relation_data)
+ self.set_remote(**relation_data)
+
+ def manage_resources(self, crm):
+ """
+ Request for the hacluster to manage the resources defined in the
+ crm object.
+
+ res = CRM()
+ res.primitive('res_neutron_haproxy', 'lsb:haproxy',
+ op='monitor interval="5s"')
+ res.init_services('haproxy')
+ res.clone('cl_nova_haproxy', 'res_neutron_haproxy')
+
+ hacluster.manage_resources(crm)
+
+ :param crm: CRM() instance - Config object for Pacemaker resources
+ :returns: None
+ """
+ relation_data = {
+ 'json_{}'.format(k): json.dumps(v, sort_keys=True)
+ for k, v in crm.items() if v
+ }
+ if data_changed('hacluster-manage_resources', relation_data):
+ self.set_local(**relation_data)
+ self.set_remote(**relation_data)
+
+ def bind_resources(self, iface=None, mcastport=None):
+ """Inform the ha subordinate about each service it should manage. The
+ child class specifies the services via self.ha_resources
+
+ :param iface: string - Network interface to bind to
+ :param mcastport: int - Multicast port corosync should use for cluster
+ management traffic
+ """
+ if mcastport is None:
+ mcastport = 4440
+ resources_dict = self.get_local('resources')
+ self.bind_on(iface=iface, mcastport=mcastport)
+ if resources_dict:
+ resources = relations.hacluster.common.CRM(**resources_dict)
+ self.manage_resources(resources)
+
+ def delete_resource(self, resource_name):
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add_delete_resource(resource_name)
+ self.set_local(resources=resources)
+
+ def add_vip(self, name, vip, iface=None, netmask=None):
+ """Add a VirtualIP object for each user specified vip to self.resources
+
+ :param name: string - Name of service
+ :param vip: string - Virtual IP to be managed
+ :param iface: string - Network interface to bind vip to
+ :param netmask: string - Netmask for vip
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.VirtualIP(
+ name,
+ vip,
+ nic=iface,
+ cidr=netmask,))
+
+ # Vip Group
+ group = 'grp_{}_vips'.format(name)
+ vip_res_group_members = []
+ if resource_dict:
+ vip_resources = resource_dict.get('resources')
+ if vip_resources:
+ for vip_res in vip_resources:
+ if 'vip' in vip_res:
+ vip_res_group_members.append(vip_res)
+ resources.group(group,
+ *sorted(vip_res_group_members))
+
+ self.set_local(resources=resources)
+
+ def remove_vip(self, name, vip, iface=None):
+ """Remove a virtual IP
+
+ :param name: string - Name of service
+ :param vip: string - Virtual IP
+ :param iface: string - Network interface vip bound to
+ """
+ if iface:
+ nic_name = iface
+ else:
+ nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]
+ self.delete_resource('res_{}_{}_vip'.format(name, nic_name))
+
+ def add_init_service(self, name, service, clone=True):
+ """Add a InitService object for haproxy to self.resources
+
+ :param name: string - Name of service
+ :param service: string - Name service uses in init system
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.InitService(name, service, clone))
+ self.set_local(resources=resources)
+
+ def remove_init_service(self, name, service):
+ """Remove an init service
+
+ :param name: string - Name of service
+ :param service: string - Name of service used in init system
+ """
+ res_key = 'res_{}_{}'.format(
+ name.replace('-', '_'),
+ service.replace('-', '_'))
+ self.delete_resource(res_key)
+
+ def add_systemd_service(self, name, service, clone=True):
+ """Add a SystemdService object to self.resources
+
+ :param name: string - Name of service
+ :param service: string - Name service uses in systemd
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.SystemdService(name, service, clone))
+ self.set_local(resources=resources)
+
+ def remove_systemd_service(self, name, service):
+ """Remove a systemd service
+
+ :param name: string - Name of service
+ :param service: string - Name of service used in systemd
+ """
+ res_key = 'res_{}_{}'.format(
+ name.replace('-', '_'),
+ service.replace('-', '_'))
+ self.delete_resource(res_key)
+
+ def add_dnsha(self, name, ip, fqdn, endpoint_type):
+ """Add a DNS entry to self.resources
+
+ :param name: string - Name of service
+ :param ip: string - IP address dns entry should resolve to
+ :param fqdn: string - The DNS entry name
+ :param endpoint_type: string - Public, private, internal etc
+ :returns: None
+ """
+ resource_dict = self.get_local('resources')
+ if resource_dict:
+ resources = relations.hacluster.common.CRM(**resource_dict)
+ else:
+ resources = relations.hacluster.common.CRM()
+ resources.add(
+ relations.hacluster.common.DNSEntry(name, ip, fqdn, endpoint_type))
+
+ # DNS Group
+ group = 'grp_{}_hostnames'.format(name)
+ dns_res_group_members = []
+ if resource_dict:
+ dns_resources = resource_dict.get('resources')
+ if dns_resources:
+ for dns_res in dns_resources:
+ if 'hostname' in dns_res:
+ dns_res_group_members.append(dns_res)
+ resources.group(group,
+ *sorted(dns_res_group_members))
+
+ self.set_local(resources=resources)
+
+ def remove_dnsha(self, name, endpoint_type):
+ """Remove a DNS entry
+
+ :param name: string - Name of service
+ :param endpoint_type: string - Public, private, internal etc
+ :returns: None
+ """
+ res_key = 'res_{}_{}_hostname'.format(
+ self.service_name.replace('-', '_'),
+ self.endpoint_type)
+ self.delete_resource(res_key)
+
+ def get_remote_all(self, key, default=None):
+ """Return a list of all values presented by remote units for key"""
+ values = []
+ for conversation in self.conversations():
+ for relation_id in conversation.relation_ids:
+ for unit in hookenv.related_units(relation_id):
+ value = hookenv.relation_get(key,
+ unit,
+ relation_id) or default
+ if value:
+ values.append(value)
+ return list(set(values))
diff --git a/kubernetes-master/hooks/relations/hacluster/test-requirements.txt b/kubernetes-master/hooks/relations/hacluster/test-requirements.txt
new file mode 100644
index 0000000..6da7df2
--- /dev/null
+++ b/kubernetes-master/hooks/relations/hacluster/test-requirements.txt
@@ -0,0 +1,6 @@
+# Lint and unit test requirements
+flake8
+stestr>=2.2.0
+charms.reactive
+coverage>=3.6
+netifaces
diff --git a/kubernetes-master/hooks/relations/http/.gitignore b/kubernetes-master/hooks/relations/http/.gitignore
new file mode 100644
index 0000000..3374ec2
--- /dev/null
+++ b/kubernetes-master/hooks/relations/http/.gitignore
@@ -0,0 +1,5 @@
+# Emacs save files
+*~
+\#*\#
+.\#*
+
diff --git a/kubernetes-master/hooks/relations/http/README.md b/kubernetes-master/hooks/relations/http/README.md
new file mode 100644
index 0000000..3d7822a
--- /dev/null
+++ b/kubernetes-master/hooks/relations/http/README.md
@@ -0,0 +1,68 @@
+# Overview
+
+This interface layer implements the basic form of the `http` interface protocol,
+which is used for things such as reverse-proxies, load-balanced servers, REST
+service discovery, et cetera.
+
+# Usage
+
+## Provides
+
+By providing the `http` interface, your charm is providing an HTTP server that
+can be load-balanced, reverse-proxied, used as a REST endpoint, etc.
+
+Your charm need only provide the port on which it is serving its content, as
+soon as the `{relation_name}.available` state is set:
+
+```python
+@when('website.available')
+def configure_website(website):
+ website.configure(port=hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `http` interface, your charm is consuming one or more HTTP
+servers, as a REST endpoint, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `services()` method returns a list of available HTTP services and their
+associated hosts and ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charms.reactive.helpers import data_changed
+
+@when('reverseproxy.available')
+def update_reverse_proxy_config(reverseproxy):
+ services = reverseproxy.services()
+ if not data_changed('reverseproxy.services', services):
+ return
+ for service in services:
+ for host in service['hosts']:
+ hookenv.log('{} has a unit {}:{}'.format(
+ services['service_name'],
+ host['hostname'],
+ host['port']))
+```
diff --git a/kubernetes-master/hooks/relations/http/__init__.py b/kubernetes-master/hooks/relations/http/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/http/interface.yaml b/kubernetes-master/hooks/relations/http/interface.yaml
new file mode 100644
index 0000000..54e7748
--- /dev/null
+++ b/kubernetes-master/hooks/relations/http/interface.yaml
@@ -0,0 +1,4 @@
+name: http
+summary: Basic HTTP interface
+version: 1
+repo: https://git.launchpad.net/~bcsaller/charms/+source/http
diff --git a/kubernetes-master/hooks/relations/http/provides.py b/kubernetes-master/hooks/relations/http/provides.py
new file mode 100644
index 0000000..86fa9b3
--- /dev/null
+++ b/kubernetes-master/hooks/relations/http/provides.py
@@ -0,0 +1,67 @@
+import json
+
+from charmhelpers.core import hookenv
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpProvides(Endpoint):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def get_ingress_address(self, rel_id=None):
+ # If no rel_id is provided, we fallback to the first one
+ if rel_id is None:
+ rel_id = self.relations[0].relation_id
+ return hookenv.ingress_address(rel_id, hookenv.local_unit())
+
+ def configure(self, port, private_address=None, hostname=None):
+ ''' configure the address(es). private_address and hostname can
+ be None, a single string address/hostname, or a list of addresses
+ and hostnames. Note that if a list is passed, it is assumed both
+ private_address and hostname are either lists or None '''
+ for relation in self.relations:
+ ingress_address = self.get_ingress_address(relation.relation_id)
+ if type(private_address) is list or type(hostname) is list:
+ # build 3 lists to zip together that are the same length
+ length = max(len(private_address), len(hostname))
+ p = [port] * length
+ a = private_address + [ingress_address] *\
+ (length - len(private_address))
+ h = hostname + [ingress_address] * (length - len(hostname))
+ zipped_list = zip(p, a, h)
+ # now build an array of dictionaries from that in the desired
+ # format for the interface
+ data_list = [{'hostname': h, 'port': p, 'private-address': a}
+ for p, a, h in zipped_list]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ data = data_list.pop(0)
+ data['extended_data'] = json.dumps(data_list)
+
+ relation.to_publish_raw.update(data)
+ else:
+ relation.to_publish_raw.update({
+ 'hostname': hostname or ingress_address,
+ 'private-address': private_address or ingress_address,
+ 'port': port,
+ })
+
+ def set_remote(self, **kwargs):
+ # NB: This method provides backwards compatibility for charms that
+ # called RelationBase.set_remote. Most commonly, this was done by
+ # charms that needed to pass reverse proxy stanzas to http proxies.
+ # This type of interaction with base relation classes is discouraged,
+ # and should be handled with logic encapsulated in appropriate
+ # interfaces. Eventually, this method will be deprecated in favor of
+ # that behavior.
+ for relation in self.relations:
+ relation.to_publish_raw.update(kwargs)
diff --git a/kubernetes-master/hooks/relations/http/requires.py b/kubernetes-master/hooks/relations/http/requires.py
new file mode 100644
index 0000000..17ea6b7
--- /dev/null
+++ b/kubernetes-master/hooks/relations/http/requires.py
@@ -0,0 +1,76 @@
+import json
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpRequires(Endpoint):
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ if any(unit.received_raw['port'] for unit in self.all_joined_units):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def services(self):
+ """
+ Returns a list of available HTTP services and their associated hosts
+ and ports.
+
+ The return value is a list of dicts of the following form::
+
+ [
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'private-address': private_address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+ ]
+ """
+ def build_service_host(data):
+ private_address = data['private-address']
+ host = data['hostname'] or private_address
+ if host and data['port']:
+ return (host, private_address, data['port'])
+ else:
+ return None
+
+ services = {}
+ for relation in self.relations:
+ service_name = relation.application_name
+ service = services.setdefault(service_name, {
+ 'service_name': service_name,
+ 'hosts': [],
+ })
+ host_set = set()
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ host = build_service_host(data)
+ if host:
+ host_set.add(host)
+
+ # if we have extended data, add it
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ host = build_service_host(ed)
+ if host:
+ host_set.add(host)
+
+ service['hosts'] = [
+ {'hostname': h, 'private-address': pa, 'port': p}
+ for h, pa, p in sorted(host_set)
+ ]
+
+ ret = [s for s in services.values() if s['hosts']]
+ return ret
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/.gitignore b/kubernetes-master/hooks/relations/keystone-credentials/.gitignore
new file mode 100644
index 0000000..172bf57
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/.gitignore
@@ -0,0 +1 @@
+.tox
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/.gitreview b/kubernetes-master/hooks/relations/keystone-credentials/.gitreview
new file mode 100644
index 0000000..b9fc7e4
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.opendev.org
+port=29418
+project=openstack/charm-interface-keystone-credentials
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/.stestr.conf b/kubernetes-master/hooks/relations/keystone-credentials/.stestr.conf
new file mode 100644
index 0000000..5fcccac
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=./unit_tests
+top_dir=./
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/.zuul.yaml b/kubernetes-master/hooks/relations/keystone-credentials/.zuul.yaml
new file mode 100644
index 0000000..23d3066
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/.zuul.yaml
@@ -0,0 +1,3 @@
+- project:
+ templates:
+ - python-charm-interface-jobs
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/__init__.py b/kubernetes-master/hooks/relations/keystone-credentials/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/copyright b/kubernetes-master/hooks/relations/keystone-credentials/copyright
new file mode 100644
index 0000000..5a49dcb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/copyright
@@ -0,0 +1,21 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
+
+Files: *
+Copyright: 2015, Canonical Ltd.
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in `/usr/share/common-licenses/Apache-2.0'.
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/interface.yaml b/kubernetes-master/hooks/relations/keystone-credentials/interface.yaml
new file mode 100644
index 0000000..5d99a86
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/interface.yaml
@@ -0,0 +1,16 @@
+name: keystone-credentials
+summary: >
+ Interface for integrating with Keystone identity credentials
+ Charms use this relation to obtain keystone credentials
+ without creating a service catalog entry. Set 'username'
+ only on the relation and keystone will set defaults and
+ return authentication details. Possible relation settings:
+ username: Username to be created.
+ project: Project (tenant) name to be created. Defaults to services
+ project.
+ requested_roles: Comma delimited list of roles to be created
+ requested_grants: Comma delimited list of roles to be granted.
+ Defaults to Admin role.
+ domain: Keystone v3 domain the user will be created in. Defaults
+ to the Default domain.
+maintainer: OpenStack Charmers
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/provides.py b/kubernetes-master/hooks/relations/keystone-credentials/provides.py
new file mode 100644
index 0000000..e5a9dec
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/provides.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class KeystoneProvides(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:keystone-credentials}-relation-joined')
+ def joined(self):
+ self.set_flag('{relation_name}.connected')
+
+ @hook('{provides:keystone-credentials}-relation-{broken,departed}')
+ def departed(self):
+ self.clear_flag('{relation_name}.connected')
+
+ def expose_credentials(self, credentials):
+ """Expose Keystone credentials to related units.
+
+ :param credentials: The Keystone credentials to be exposed.
+ :type credentials: dict
+ """
+ self.set_remote(**credentials)
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/requires.py b/kubernetes-master/hooks/relations/keystone-credentials/requires.py
new file mode 100644
index 0000000..93c7a53
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/requires.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core import hookenv
+from charms.reactive import RelationBase
+from charms.reactive import hook
+from charms.reactive import scopes
+
+
+class KeystoneRequires(RelationBase):
+ scope = scopes.GLOBAL
+
+ # These remote data fields will be automatically mapped to accessors
+ # with a basic documentation string provided.
+
+ auto_accessors = ['private-address', 'credentials_host',
+ 'credentials_protocol', 'credentials_port',
+ 'credentials_project', 'credentials_username',
+ 'credentials_password', 'credentials_project_id',
+ 'credentials_project_domain_id',
+ 'credentials_user_domain_id',
+ 'credentials_project_domain_name',
+ 'credentials_user_domain_name',
+ 'api_version', 'auth_host', 'auth_protocol', 'auth_port',
+ 'region', 'ca_cert', 'https_keystone']
+
+ @hook('{requires:keystone-credentials}-relation-joined')
+ def joined(self):
+ self.set_state('{relation_name}.connected')
+ self.update_state()
+
+ def update_state(self):
+ """Update the states of the relations based on the data that the
+ relation has.
+
+ If the :meth:`base_data_complete` is False then all of the states
+ are removed. Otherwise, the individual states are set according to
+ their own data methods.
+ """
+ base_complete = self.base_data_complete()
+ states = {
+ '{relation_name}.available': True,
+ '{relation_name}.available.ssl': self.ssl_data_complete(),
+ '{relation_name}.available.auth': self.auth_data_complete()
+ }
+ for k, v in states.items():
+ if base_complete and v:
+ self.set_state(k)
+ else:
+ self.remove_state(k)
+
+ @hook('{requires:keystone-credentials}-relation-changed')
+ def changed(self):
+ self.update_state()
+ self.set_state('{relation_name}.available.updated')
+ hookenv.atexit(self._clear_updated)
+
+ @hook('{requires:keystone-credentials}-relation-{broken,departed}')
+ def departed(self):
+ self.update_state()
+
+ def base_data_complete(self):
+ data = {
+ 'private-address': self.private_address(),
+ 'credentials_host': self.credentials_host(),
+ 'credentials_protocol': self.credentials_protocol(),
+ 'credentials_port': self.credentials_port(),
+ 'api_version': self.api_version(),
+ 'auth_host': self.auth_host(),
+ 'auth_protocol': self.auth_protocol(),
+ 'auth_port': self.auth_port(),
+ }
+ if all(data.values()):
+ return True
+ return False
+
+ def auth_data_complete(self):
+ data = {
+ 'credentials_project': self.credentials_project(),
+ 'credentials_username': self.credentials_username(),
+ 'credentials_password': self.credentials_password(),
+ 'credentials_project_id': self.credentials_project_id(),
+ }
+ if all(data.values()):
+ return True
+ return False
+
+ def ssl_data_complete(self):
+ data = {
+ 'https_keystone': self.https_keystone(),
+ 'ca_cert': self.ca_cert(),
+ }
+ for value in data.values():
+ if not value or value == '__null__':
+ return False
+ return True
+
+ def request_credentials(self, username, project=None, region=None,
+ requested_roles=None, requested_grants=None,
+ domain=None):
+ """
+ Request credentials from Keystone
+
+ :side effect: set requested paramaters on the identity-credentials
+ relation
+
+ Required parameter
+ :param username: Username to be created.
+
+ Optional parametrs
+ :param project: Project (tenant) name to be created. Defaults to
+ services project.
+ :param requested_roles: Comma delimited list of roles to be created
+ :param requested_grants: Comma delimited list of roles to be granted.
+ Defaults to Admin role.
+ :param domain: Keystone v3 domain the user will be created in. Defaults
+ to the Default domain.
+ """
+ relation_info = {
+ 'username': username,
+ 'project': project,
+ 'requested_roles': requested_roles,
+ 'requested_grants': requested_grants,
+ 'domain': domain,
+ }
+
+ self.set_local(**relation_info)
+ self.set_remote(**relation_info)
+
+ def _clear_updated(self):
+ self.remove_state('{relation_name}.available.updated')
diff --git a/kubernetes-master/hooks/relations/keystone-credentials/test-requirements.txt b/kubernetes-master/hooks/relations/keystone-credentials/test-requirements.txt
new file mode 100644
index 0000000..9ea2415
--- /dev/null
+++ b/kubernetes-master/hooks/relations/keystone-credentials/test-requirements.txt
@@ -0,0 +1,2 @@
+flake8>=2.2.4
+stestr>=2.2.0
diff --git a/kubernetes-master/hooks/relations/kube-control/.travis.yml b/kubernetes-master/hooks/relations/kube-control/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-control/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-master/hooks/relations/kube-control/README.md b/kubernetes-master/hooks/relations/kube-control/README.md
new file mode 100644
index 0000000..6f9ecb7
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-control/README.md
@@ -0,0 +1,171 @@
+# kube-control interface
+
+This interface provides communication between master and workers in a
+Kubernetes cluster.
+
+
+## Provides (kubernetes-master side)
+
+
+### States
+
+* `kube-control.connected`
+
+ Enabled when a worker has joined the relation.
+
+* `kube-control.gpu.available`
+
+ Enabled when any worker has indicated that it is running in gpu mode.
+
+* `kube-control.departed`
+
+ Enabled when any worker has indicated that it is leaving the cluster.
+
+
+* `kube-control.auth.requested`
+
+ Enabled when an authentication credential is requested. This state is
+ temporary and will be removed once the units authentication request has
+ been fulfilled.
+
+### Methods
+
+* `kube_control.set_dns(port, domain, sdn_ip)`
+
+ Sends DNS info to the connected worker(s).
+
+
+* `kube_control.auth_user()`
+
+ Returns a list of the requested username and group requested for
+ authentication.
+
+* `kube_control.sign_auth_request(scope, user, kubelet_token, proxy_token, client_token)`
+
+ Sends authentication tokens to the unit scope for the requested user
+ and kube-proxy services.
+
+* `kube_control.set_cluster_tag(cluster_tag)`
+
+ Sends a tag used to identify resources that are part of the cluster to the
+ connected worker(s).
+
+* `kube_control.flush_departed()`
+
+ Returns the unit departing the kube_control relationship so you can do any
+ post removal cleanup. Such as removing authentication tokens for the unit.
+ Invoking this method will also remove the `kube-control.departed` state
+
+* `kube_control.set_registry_location(registry_location)`
+ Sends the container image registry location to the connected worker(s).
+
+### Examples
+
+```python
+
+@when('kube-control.connected')
+def send_dns(kube_control):
+ # send port, domain, sdn_ip to the remote side
+ kube_control.set_dns(53, "cluster.local", "10.1.0.10")
+
+@when('kube-control.gpu.available')
+def on_gpu_available(kube_control):
+ # The remote side is gpu-enable, handle it somehow
+ assert kube_control.get_gpu() == True
+
+
+@when('kube-control.departed')
+@when('leadership.is_leader')
+def flush_auth_for_departed(kube_control):
+ ''' Unit has left the cluster and needs to have its authentication
+ tokens removed from the token registry '''
+ departing_unit = kube_control.flush_departed()
+
+```
+
+## Requires (kubernetes-worker side)
+
+
+### States
+
+* `kube-control.connected`
+
+ Enabled when a master has joined the relation.
+
+* `kube-control.dns.available`
+
+ Enabled when DNS info is available from the master.
+
+* `kube-control.auth.available`
+
+ Enabled when authentication credentials are present from the master.
+
+* `kube-control.cluster_tag.available`
+
+ Enabled when cluster tag is present from the master.
+
+* `kube-control.registry_location.available`
+
+ Enabled when registry location is present from the master.
+
+### Methods
+
+* `kube_control.get_dns()`
+
+ Returns a dictionary of DNS info sent by the master. The keys in the
+ dict are: domain, private-address, sdn-ip, port.
+
+* `kube_control.set_gpu(enabled=True)`
+
+ Tell the master that we are gpu-enabled.
+
+* `kube_control.get_auth_credentials(user)`
+
+ Returns a dict with the users authentication credentials.
+
+* `set_auth_request(kubelet, group='system:nodes')`
+
+ Issue an authentication request against the master to receive token based
+ auth credentials in return.
+
+* `kube_control.get_cluster_tag()`
+
+ Returns the cluster tag provided by the master.
+
+* `kube_control.get_registry_location()`
+
+ Returns the container image registry location provided by the master.
+
+### Examples
+
+```python
+
+@when('kube-control.dns.available')
+def on_dns_available(kube_control):
+ # Remote side has sent DNS info
+ dns = kube_control.get_dns()
+ print(context['domain'])
+ print(context['private-address'])
+ print(context['sdn-ip'])
+ print(context['port'])
+
+@when('kube-control.connected')
+def send_gpu(kube_control):
+ # Tell the master that we're gpu-enabled
+ kube_control.set_gpu(True)
+
+@when('kube-control.auth.available')
+def display_auth_tokens(kube_control):
+ # Remote side has sent auth info
+ auth = kube_control.get_auth_credentials('root')
+ print(auth['kubelet_token'])
+ print(auth['proxy_token'])
+ print(auth['client_token'])
+
+@when('kube-control.connected')
+@when_not('kube-control.auth.available')
+def request_auth_credentials(kube_control):
+ # Request an admin user with sudo level access named 'root'
+ kube_control.set_auth_request('root', group='system:masters')
+
+```
diff --git a/kubernetes-master/hooks/relations/kube-control/__init__.py b/kubernetes-master/hooks/relations/kube-control/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/kube-control/interface.yaml b/kubernetes-master/hooks/relations/kube-control/interface.yaml
new file mode 100644
index 0000000..2f0b187
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-control/interface.yaml
@@ -0,0 +1,6 @@
+name: kube-control
+summary: Provides master-worker communication.
+version: 1
+maintainer: "Tim Van Steenburgh "
+ignore:
+- tests
diff --git a/kubernetes-master/hooks/relations/kube-control/provides.py b/kubernetes-master/hooks/relations/kube-control/provides.py
new file mode 100644
index 0000000..9d3a829
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-control/provides.py
@@ -0,0 +1,152 @@
+#!/usr/local/sbin/charm-env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from charms.reactive import (
+ Endpoint,
+ toggle_flag,
+ set_flag,
+ data_changed
+)
+
+from charmhelpers.core import (
+ hookenv,
+ unitdata
+)
+
+
+DB = unitdata.kv()
+
+
+class KubeControlProvider(Endpoint):
+ """
+ Implements the kubernetes-master side of the kube-control interface.
+ """
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.gpu.available'),
+ self.is_joined and self._get_gpu())
+ requests_data_id = self.expand_name('{endpoint_name}.requests')
+ requests = self.auth_user()
+ if data_changed(requests_data_id, requests):
+ set_flag(self.expand_name('{endpoint_name}.requests.changed'))
+
+ def set_dns(self, port, domain, sdn_ip, enable_kube_dns):
+ """
+ Send DNS info to the remote units.
+
+ We'll need the port, domain, and sdn_ip of the dns service. If
+ sdn_ip is not required in your deployment, the units private-ip
+ is available implicitly.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'port': port,
+ 'domain': domain,
+ 'sdn-ip': sdn_ip,
+ 'enable-kube-dns': enable_kube_dns,
+ })
+
+ def auth_user(self):
+ """
+ Return the kubelet_user value on the wire from the requestors.
+ """
+ requests = []
+
+ for unit in self.all_joined_units:
+ requests.append(
+ (unit.unit_name,
+ {'user': unit.received_raw.get('kubelet_user'),
+ 'group': unit.received_raw.get('auth_group')})
+ )
+
+ requests.sort()
+ return requests
+
+ def sign_auth_request(self, scope, user, kubelet_token, proxy_token,
+ client_token):
+ """
+ Send authorization tokens to the requesting unit.
+ """
+ cred = {
+ 'scope': scope,
+ 'kubelet_token': kubelet_token,
+ 'proxy_token': proxy_token,
+ 'client_token': client_token
+ }
+
+ if not DB.get('creds'):
+ DB.set('creds', {})
+
+ all_creds = DB.get('creds')
+ all_creds[user] = cred
+ DB.set('creds', all_creds)
+
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'creds': all_creds
+ })
+
+ def clear_creds(self):
+ """
+ Clear creds from the relation. This is used by non-leader units to stop
+ advertising creds so that the leader can assume full control of them.
+ """
+ DB.unset('creds')
+ for relation in self.relations:
+ relation.to_publish_raw['creds'] = ''
+
+ def _get_gpu(self):
+ """
+ Return True if any remote worker is gpu-enabled.
+ """
+ for unit in self.all_joined_units:
+ if unit.received_raw.get('gpu') == 'True':
+ hookenv.log('Unit {} has gpu enabled'.format(unit))
+ return True
+
+ return False
+
+ def set_cluster_tag(self, cluster_tag):
+ """
+ Send the cluster tag to the remote units.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'cluster-tag': cluster_tag
+ })
+
+ def set_registry_location(self, registry_location):
+ """
+ Send the registry location to the remote units.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'registry-location': registry_location
+ })
+
+ def set_cohort_keys(self, cohort_keys):
+ """
+ Send the cohort snapshot keys.
+ """
+ for relation in self.relations:
+ relation.to_publish['cohort-keys'] = cohort_keys
+
+ def set_default_cni(self, default_cni):
+ """
+ Send the default CNI. The default_cni value should be a string
+ containing the name of a related CNI application to use as the
+ default CNI. For example: "flannel" or "calico". If no default has
+ been chosen then "" can be sent instead.
+ """
+ for relation in self.relations:
+ relation.to_publish['default-cni'] = default_cni
diff --git a/kubernetes-master/hooks/relations/kube-control/requires.py b/kubernetes-master/hooks/relations/kube-control/requires.py
new file mode 100644
index 0000000..72ce1f6
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-control/requires.py
@@ -0,0 +1,149 @@
+#!/usr/local/sbin/charm-env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import (
+ Endpoint,
+ toggle_flag,
+)
+
+from charmhelpers.core.hookenv import log
+
+
+class KubeControlRequirer(Endpoint):
+ """
+ Implements the kubernetes-worker side of the kube-control interface.
+ """
+ def manage_flags(self):
+ """
+ Set states corresponding to the data we have.
+ """
+ toggle_flag(
+ self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.dns.available'),
+ self.is_joined and self.dns_ready())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.auth.available'),
+ self.is_joined and self._has_auth_credentials())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.cluster_tag.available'),
+ self.is_joined and self.get_cluster_tag())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.registry_location.available'),
+ self.is_joined and self.get_registry_location())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.cohort_keys.available'),
+ self.is_joined and self.cohort_keys)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.default_cni.available'),
+ self.is_joined and self.get_default_cni() is not None)
+
+ def get_auth_credentials(self, user):
+ """
+ Return the authentication credentials.
+ """
+ rx = {}
+ for unit in self.all_joined_units:
+ rx.update(unit.received.get('creds', {}))
+ if not rx:
+ return None
+
+ if user in rx:
+ return {
+ 'user': user,
+ 'kubelet_token': rx[user]['kubelet_token'],
+ 'proxy_token': rx[user]['proxy_token'],
+ 'client_token': rx[user]['client_token']
+ }
+ else:
+ return None
+
+ def get_dns(self):
+ """
+ Return DNS info provided by the master.
+ """
+ rx = self.all_joined_units.received_raw
+
+ return {
+ 'port': rx.get('port'),
+ 'domain': rx.get('domain'),
+ 'sdn-ip': rx.get('sdn-ip'),
+ 'enable-kube-dns': rx.get('enable-kube-dns'),
+ }
+
+ def dns_ready(self):
+ """
+ Return True if we have all DNS info from the master.
+ """
+ keys = ['port', 'domain', 'sdn-ip', 'enable-kube-dns']
+ dns_info = self.get_dns()
+ return (set(dns_info.keys()) == set(keys) and
+ dns_info['enable-kube-dns'] is not None)
+
+ def set_auth_request(self, kubelet, group='system:nodes'):
+ """
+ Tell the master that we are requesting auth, and to use this
+ hostname for the kubelet system account.
+
+ Param groups - Determines the level of eleveted privleges of the
+ requested user. Can be overridden to request sudo level access on the
+ cluster via changing to system:masters.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'kubelet_user': kubelet,
+ 'auth_group': group
+ })
+
+ def set_gpu(self, enabled=True):
+ """
+ Tell the master that we're gpu-enabled (or not).
+ """
+ log('Setting gpu={} on kube-control relation'.format(enabled))
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'gpu': enabled
+ })
+
+ def _has_auth_credentials(self):
+ """
+ Predicate method to signal we have authentication credentials.
+ """
+ if self.all_joined_units.received_raw.get('creds'):
+ return True
+
+ def get_cluster_tag(self):
+ """
+ Tag for identifying resources that are part of the cluster.
+ """
+ return self.all_joined_units.received_raw.get('cluster-tag')
+
+ def get_registry_location(self):
+ """
+ URL for container image registry.
+ """
+ return self.all_joined_units.received_raw.get('registry-location')
+
+ @property
+ def cohort_keys(self):
+ """
+ The cohort snapshot keys sent by the masters.
+ """
+ return self.all_joined_units.received['cohort-keys']
+
+ def get_default_cni(self):
+ """
+ Default CNI network to use.
+ """
+ return self.all_joined_units.received['default-cni']
diff --git a/kubernetes-master/hooks/relations/kube-dns/README.md b/kubernetes-master/hooks/relations/kube-dns/README.md
new file mode 100644
index 0000000..15ce8bb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-dns/README.md
@@ -0,0 +1,21 @@
+# Kube-DNS
+
+This interface allows a DNS provider, such as CoreDNS, to provide name
+resolution for a Kubernetes cluster.
+
+(Note: this interface was previously used by the Kubernetes Master charm to
+communicate the DNS provider info to the Kubernetes Worker charm, but that
+usage was folded into the `kube-control` interface.)
+
+
+# Provides
+
+The provider should look for the `{endpoint_name}.connected` flag and call
+the `set_dns_info` method with the `domain`, `sdn_ip`, and `port` info (note:
+these must be provided as keyword arguments).
+
+# Requires
+
+The requirer should look for the `{endpoint_name}.available` flag and call the
+`details` method, which will return a dictionary with the `domain`, `sdn-ip`,
+and `port` keys.
diff --git a/kubernetes-master/hooks/relations/kube-dns/__init__.py b/kubernetes-master/hooks/relations/kube-dns/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/kube-dns/interface.yaml b/kubernetes-master/hooks/relations/kube-dns/interface.yaml
new file mode 100644
index 0000000..2de32b0
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-dns/interface.yaml
@@ -0,0 +1,4 @@
+name: kube-dns
+summary: provides the kubernetes dns settings
+version: 1
+maintainer: "Charles Butler "
diff --git a/kubernetes-master/hooks/relations/kube-dns/provides.py b/kubernetes-master/hooks/relations/kube-dns/provides.py
new file mode 100644
index 0000000..a7199c3
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-dns/provides.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint, toggle_flag
+
+
+class KubeDNSProvider(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+
+ def set_dns_info(self, *, domain, sdn_ip, port):
+ '''Set the domain, sdn_ip, and port of the DNS provider.'''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'domain': domain,
+ 'sdn-ip': sdn_ip,
+ 'port': port,
+ })
diff --git a/kubernetes-master/hooks/relations/kube-dns/requires.py b/kubernetes-master/hooks/relations/kube-dns/requires.py
new file mode 100644
index 0000000..9595c4a
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-dns/requires.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint, toggle_flag
+
+
+class KubeDNSRequireer(Endpoint):
+ def manage_flags(self):
+ '''Set flags according to whether we have DNS provider details.'''
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.has_info())
+
+ def details(self):
+ '''Return the DNS provider details.'''
+ return {
+ 'domain': self._get_value('domain'),
+ 'sdn-ip': self._get_value('sdn-ip'),
+ 'port': self._get_value('port'),
+ }
+
+ def has_info(self):
+ ''' Determine if we have all needed info'''
+ return all(self.details().values())
+
+ def _get_value(self, key):
+ return self.all_joined_units.received_raw.get(key)
diff --git a/kubernetes-master/hooks/relations/kube-masters/README.md b/kubernetes-master/hooks/relations/kube-masters/README.md
new file mode 100644
index 0000000..43cfa90
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-masters/README.md
@@ -0,0 +1,45 @@
+# kube-masters interface
+
+This interface provides communication amongst kubernetes-masters in a cluster.
+
+## States
+
+* `kube-masters.connected`
+
+ Enabled when any kubernetes-master unit has joined the relation.
+
+* `kube-masters.cohorts.ready`
+
+ Enabled when all peers have snap cohort data.
+
+### Methods and Properties
+
+* `kube-masters.set_cohort_keys(cohort_keys)`
+
+ Set a dictionary of cohort keys created by the snap layer.
+
+* `kube-masters.cohort_keys`
+
+ Dictionary of all cohort keys sent by peers.
+
+### Examples
+
+```python
+
+@when('kube-masters.connected')
+def agree_on_cohorts():
+ kube_masters = endpoint_from_flag('kube-masters.connected')
+ cohort_keys = create_cohorts_for_my_snaps()
+ kube_masters.set_cohort_keys(cohort_keys)
+
+@when('kube-masters.cohorts.ready',
+ 'kube-control.connected')
+def send_cohorts_to_workers():
+ kube_masters = endpoint_from_flag('kube-masters.cohorts.ready')
+ cohort_keys = kube_masters.cohort_keys
+
+ kube_control = endpoint_from_flag('kube-control.connected')
+ # The following set method is defined in interface-kube-control
+ kube_control.set_cohort_keys(cohort_keys)
+
+```
diff --git a/kubernetes-master/hooks/relations/kube-masters/__init__.py b/kubernetes-master/hooks/relations/kube-masters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/kube-masters/interface.yaml b/kubernetes-master/hooks/relations/kube-masters/interface.yaml
new file mode 100644
index 0000000..bd346d9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-masters/interface.yaml
@@ -0,0 +1,4 @@
+name: kube-masters
+summary: Provides master peer communication.
+version: 1
+maintainer: "Kevin W. Monroe "
diff --git a/kubernetes-master/hooks/relations/kube-masters/peers.py b/kubernetes-master/hooks/relations/kube-masters/peers.py
new file mode 100644
index 0000000..0e3021e
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kube-masters/peers.py
@@ -0,0 +1,54 @@
+#!/usr/local/sbin/charm-env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import (
+ Endpoint,
+ toggle_flag,
+)
+
+from charmhelpers.core.hookenv import log
+
+
+class KubeMastersPeer(Endpoint):
+ """
+ Implements peering for kubernetes-master units.
+ """
+ def manage_flags(self):
+ """
+ Set states corresponding to the data we have.
+ """
+ toggle_flag(
+ self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.cohorts.ready'),
+ self.is_joined and self._peers_have_cohorts())
+
+ def _peers_have_cohorts(self):
+ """
+ Return True if all peers have cohort keys.
+ """
+ for unit in self.all_joined_units:
+ if not unit.received.get('cohort-keys'):
+ log('Unit {} does not yet have cohort-keys'.format(unit))
+ return False
+
+ log('All units have cohort-keys')
+ return True
+
+ def set_cohort_keys(self, cohort_keys):
+ """
+ Send the cohort snapshot keys.
+ """
+ for relation in self.relations:
+ relation.to_publish['cohort-keys'] = cohort_keys
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml b/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/README.md b/kubernetes-master/hooks/relations/kubernetes-cni/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/__init__.py b/kubernetes-master/hooks/relations/kubernetes-cni/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/interface.yaml b/kubernetes-master/hooks/relations/kubernetes-cni/interface.yaml
new file mode 100644
index 0000000..7e3c123
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kubernetes-cni/interface.yaml
@@ -0,0 +1,6 @@
+name: kubernetes-cni
+summary: Interface for relating various CNI implementations
+version: 0
+maintainer: "George Kraft "
+ignore:
+- tests
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/provides.py b/kubernetes-master/hooks/relations/kubernetes-cni/provides.py
new file mode 100644
index 0000000..0b4aada
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kubernetes-cni/provides.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+
+from charmhelpers.core import hookenv
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag
+
+
+class CNIPluginProvider(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.config_available())
+ if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')):
+ clear_flag(self.expand_name('{endpoint_name}.configured'))
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ def set_config(self, is_master, kubeconfig_path):
+ ''' Relays a dict of kubernetes configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'is_master': is_master,
+ 'kubeconfig_path': kubeconfig_path
+ })
+ set_flag(self.expand_name('{endpoint_name}.configured'))
+
+ def config_available(self):
+ ''' Ensures all config from the CNI plugin is available. '''
+ goal_state = hookenv.goal_state()
+ related_apps = [
+ app for app in goal_state.get('relations', {}).get(self.endpoint_name, '')
+ if '/' not in app
+ ]
+ if not related_apps:
+ return False
+ configs = self.get_configs()
+ return all(
+ 'cidr' in config and 'cni-conf-file' in config
+ for config in [
+ configs.get(related_app, {}) for related_app in related_apps
+ ]
+ )
+
+ def get_config(self, default=None):
+ ''' Get CNI config for one related application.
+
+ If default is specified, and there is a related application with a
+ matching name, then that application is chosen. Otherwise, the
+ application is chosen alphabetically.
+
+ Whichever application is chosen, that application's CNI config is
+ returned.
+ '''
+ configs = self.get_configs()
+ if not configs:
+ return {}
+ elif default and default not in configs:
+ msg = 'relation not found for default CNI %s, ignoring' % default
+ hookenv.log(msg, level='WARN')
+ return self.get_config()
+ elif default:
+ return configs.get(default, {})
+ else:
+ return configs.get(sorted(configs)[0], {})
+
+ def get_configs(self):
+ ''' Get CNI configs for all related applications.
+
+ This returns a mapping of application names to CNI configs. Here's an
+ example return value:
+ {
+ 'flannel': {
+ 'cidr': '10.1.0.0/16',
+ 'cni-conf-file': '10-flannel.conflist'
+ },
+ 'calico': {
+ 'cidr': '192.168.0.0/16',
+ 'cni-conf-file': '10-calico.conflist'
+ }
+ }
+ '''
+ return {
+ relation.application_name: relation.joined_units.received_raw
+ for relation in self.relations if relation.application_name
+ }
diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/requires.py b/kubernetes-master/hooks/relations/kubernetes-cni/requires.py
new file mode 100644
index 0000000..039b912
--- /dev/null
+++ b/kubernetes-master/hooks/relations/kubernetes-cni/requires.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+
+from charms.reactive import Endpoint
+from charms.reactive import when_any, when_not
+from charms.reactive import set_state, remove_state
+
+
+class CNIPluginClient(Endpoint):
+
+ @when_any('endpoint.{endpoint_name}.joined',
+ 'endpoint.{endpoint_name}.changed')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ set_state(self.expand_name('{endpoint_name}.connected'))
+ config = self.get_config()
+ if config['is_master'] == 'True':
+ set_state(self.expand_name('{endpoint_name}.is-master'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ elif config['is_master'] == 'False':
+ set_state(self.expand_name('{endpoint_name}.is-worker'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ else:
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+ remove_state(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ remove_state(self.expand_name('{endpoint_name}.connected'))
+ remove_state(self.expand_name('{endpoint_name}.is-master'))
+ remove_state(self.expand_name('{endpoint_name}.is-worker'))
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+
+ def get_config(self):
+ ''' Get the kubernetes configuration information. '''
+ return self.all_joined_units.received_raw
+
+ def set_config(self, cidr, cni_conf_file):
+ ''' Sets the CNI configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'cidr': cidr,
+ 'cni-conf-file': cni_conf_file
+ })
diff --git a/kubernetes-master/hooks/relations/nrpe-external-master/README.md b/kubernetes-master/hooks/relations/nrpe-external-master/README.md
new file mode 100644
index 0000000..e33deb8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/nrpe-external-master/README.md
@@ -0,0 +1,66 @@
+# nrpe-external-master interface
+
+Use this interface to register nagios checks in your charm layers.
+
+## Purpose
+
+This interface is designed to interoperate with the
+[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm.
+
+## How to use in your layers
+
+The event handler for `nrpe-external-master.available` is called with an object
+through which you can register your own custom nagios checks, when a relation
+is established with `nrpe-external-master:nrpe-external-master`.
+
+This object provides a method,
+
+_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_)
+
+which is called to register a nagios plugin check for your service.
+
+All arguments are required.
+
+*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable.
+
+*name* is the name of the check registered in nagios
+
+*description* is some text that describes what the check is for and what it does
+
+*context* is the nagios context name, something that identifies your application
+
+*unit* is `hookenv.local_unit()`
+
+The nrpe subordinate installs `check_http`, so you can use it like this:
+
+```
+@when('nrpe-external-master.available')
+def setup_nagios(nagios):
+ config = hookenv.config()
+ unit_name = hookenv.local_unit()
+ nagios.add_check(['/usr/lib/nagios/plugins/check_http',
+ '-I', '127.0.0.1', '-p', str(config['port']),
+ '-e', " 200 OK", '-u', '/publickey'],
+ name="check_http",
+ description="Verify my awesome service is responding",
+ context=config["nagios_context"],
+ unit=unit_name,
+ )
+```
+If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service.
+
+Consult the nagios documentation for more information on [how to write your own
+plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html)
+or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need.
+
+## Example deployment
+
+```
+$ juju deploy your-awesome-charm
+$ juju deploy nrpe-external-master --config site-nagios.yaml
+$ juju add-relation your-awesome-charm nrpe-external-master
+```
+
+where `site-nagios.yaml` has the necessary configuration settings for the
+subordinate to connect to nagios.
+
diff --git a/kubernetes-master/hooks/relations/nrpe-external-master/__init__.py b/kubernetes-master/hooks/relations/nrpe-external-master/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/nrpe-external-master/interface.yaml b/kubernetes-master/hooks/relations/nrpe-external-master/interface.yaml
new file mode 100644
index 0000000..859a423
--- /dev/null
+++ b/kubernetes-master/hooks/relations/nrpe-external-master/interface.yaml
@@ -0,0 +1,3 @@
+name: nrpe-external-master
+summary: Nagios interface
+version: 1
diff --git a/kubernetes-master/hooks/relations/nrpe-external-master/provides.py b/kubernetes-master/hooks/relations/nrpe-external-master/provides.py
new file mode 100644
index 0000000..b6c7f0d
--- /dev/null
+++ b/kubernetes-master/hooks/relations/nrpe-external-master/provides.py
@@ -0,0 +1,91 @@
+import datetime
+import os
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+
+
+class NrpeExternalMasterProvides(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:nrpe-external-master}-relation-{joined,changed}')
+ def changed_nrpe(self):
+ self.set_state('{relation_name}.available')
+
+ @hook('{provides:nrpe-external-master}-relation-{broken,departed}')
+ def broken_nrpe(self):
+ self.remove_state('{relation_name}.available')
+
+ def add_check(self, args, name=None, description=None, context=None,
+ servicegroups=None, unit=None):
+ nagios_files = self.get_local('nagios.check.files', [])
+
+ if not unit:
+ unit = hookenv.local_unit()
+ unit = unit.replace('/', '-')
+ context = self.get_remote('nagios_host_context', context)
+ host_name = self.get_remote('nagios_hostname',
+ '%s-%s' % (context, unit))
+
+ check_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+command[%(check_name)s]=%(check_args)s
+"""
+ service_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {
+ use active-service
+ host_name %(host_name)s
+ service_description %(description)s
+ check_command check_nrpe!%(check_name)s
+ servicegroups %(servicegroups)s
+}
+"""
+ check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name)
+ with open(check_filename, "w") as fh:
+ fh.write(check_tmpl % {
+ 'check_args': ' '.join(args),
+ 'check_name': name,
+ })
+ nagios_files.append(check_filename)
+
+ service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % (
+ unit, name)
+ with open(service_filename, "w") as fh:
+ fh.write(service_tmpl % {
+ 'servicegroups': servicegroups or context,
+ 'context': context,
+ 'description': description,
+ 'check_name': name,
+ 'host_name': host_name,
+ 'unit_name': unit,
+ })
+ nagios_files.append(service_filename)
+
+ self.set_local('nagios.check.files', nagios_files)
+
+ def removed(self):
+ files = self.get_local('nagios.check.files', [])
+ for f in files:
+ try:
+ os.unlink(f)
+ except Exception as e:
+ hookenv.log("failed to remove %s: %s" % (f, e))
+ self.set_local('nagios.check.files', [])
+ self.remove_state('{relation_name}.removed')
+
+ def added(self):
+ self.updated()
+
+ def updated(self):
+ relation_info = {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ }
+ self.set_remote(**relation_info)
diff --git a/kubernetes-master/hooks/relations/nrpe-external-master/requires.py b/kubernetes-master/hooks/relations/nrpe-external-master/requires.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/openstack-integration/.gitignore b/kubernetes-master/hooks/relations/openstack-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/openstack-integration/LICENSE b/kubernetes-master/hooks/relations/openstack-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/openstack-integration/README.md b/kubernetes-master/hooks/relations/openstack-integration/README.md
new file mode 100644
index 0000000..ae021c2
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `openstack-integration` interface communciation
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:openstack-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:openstack-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `openstack-integration` interface protocol:
+
+```yaml
+requires:
+ openstack:
+ interface: openstack-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the openstack-integrator charm)
diff --git a/kubernetes-master/hooks/relations/openstack-integration/__init__.py b/kubernetes-master/hooks/relations/openstack-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/openstack-integration/copyright b/kubernetes-master/hooks/relations/openstack-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/openstack-integration/docs/provides.md b/kubernetes-master/hooks/relations/openstack-integration/docs/provides.md
new file mode 100644
index 0000000..ee17ac6
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/docs/provides.md
@@ -0,0 +1,108 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the
+OpenStack integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for OpenStack integration
+ features. The OpenStack integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+
+
+
+
+A list of all of the [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.all_requests.IntegrationRequests) that have been made.
+
+
new_requests
+
+
+A list of the new or updated [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.new_requests.IntegrationRequests) that have been made.
+
+
mark_completed
+
+```python
+OpenStackIntegrationProvides.mark_completed()
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(unit)
+```
+
+A request for integration from a single remote unit.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(auth_url,
+ region,
+ username,
+ password,
+ user_domain_name,
+ project_domain_name,
+ project_name,
+ endpoint_tls_ca,
+ version=None)
+```
+
+Set the credentials for this request.
+
+
set_lbaas_config
+
+```python
+IntegrationRequest.set_lbaas_config(subnet_id,
+ floating_network_id,
+ lb_method,
+ manage_security_groups,
+ has_octavia=None)
+```
+
+Set the load-balancer-as-a-service config for this request.
+
+
set_block_storage_config
+
+```python
+IntegrationRequest.set_block_storage_config(bs_version, trust_device_path,
+ ignore_volume_az)
+```
+
+Set the block storage config for this request.
+
diff --git a/kubernetes-master/hooks/relations/openstack-integration/docs/requires.md b/kubernetes-master/hooks/relations/openstack-integration/docs/requires.md
new file mode 100644
index 0000000..510e292
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/docs/requires.md
@@ -0,0 +1,160 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with OpenStack native features. The integration will be
+provided by the OpenStack integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of OpenStack
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific OpenStack features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the OpenStack instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+
+
OpenStackIntegrationRequires
+
+```python
+OpenStackIntegrationRequires(endpoint_name, relation_ids=None)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.openstack.ready')
+def openstack_integration_ready():
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+ update_config_enable_openstack(openstack)
+```
+
+
auth_url
+
+
+The authentication endpoint URL.
+
+
bs_version
+
+
+What block storage API version to use, `auto` if autodetection is
+desired, or `None` to use the default.
+
+
endpoint_tls_ca
+
+
+Optional base64-encoded CA certificate for the authentication endpoint,
+or None.
+
+
floating_network_id
+
+
+Optional floating network ID, or None.
+
+
has_octavia
+
+
+Whether the underlying OpenStack supports Octavia instead of
+Neutron-based LBaaS.
+
+Will either be True, False, or None if it could not be determined for
+some reason (typically due to connecting to an older integrator charm).
+
+
ignore_volume_az
+
+
+Whether to ignore availability zones when attaching Cinder volumes.
+
+Will be `True`, `False`, or `None`.
+
+
is_changed
+
+
+Whether or not the request for this instance has changed.
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
lb_method
+
+
+Optional load-balancer method, or None.
+
+
manage_security_groups
+
+
+Whether or not the Load Balancer should automatically manage security
+group rules.
+
+Will be `True` or `False`.
+
+
password
+
+
+The password.
+
+
project_domain_name
+
+
+The project domain name.
+
+
project_name
+
+
+The project name, also known as the tenant ID.
+
+
region
+
+
+The region name.
+
+
subnet_id
+
+
+Optional subnet ID to work in, or None.
+
+
trust_device_path
+
+
+Whether to trust the block device name provided by Ceph.
+
+Will be `True`, `False`, or `None`.
+
+
user_domain_name
+
+
+The user domain name.
+
+
username
+
+
+The username.
+
+
version
+
+
+Optional version number for the APIs or None.
+
diff --git a/kubernetes-master/hooks/relations/openstack-integration/interface.yaml b/kubernetes-master/hooks/relations/openstack-integration/interface.yaml
new file mode 100644
index 0000000..a94fed4
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: openstack-integration
+summary: Interface for connecting to the OpenStack integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-master/hooks/relations/openstack-integration/make_docs b/kubernetes-master/hooks/relations/openstack-integration/make_docs
new file mode 100644
index 0000000..a09c66f
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'openstack': {'interface': 'openstack'}},
+ 'provides': {'openstack': {'interface': 'openstack'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/openstack-integration/provides.py b/kubernetes-master/hooks/relations/openstack-integration/provides.py
new file mode 100644
index 0000000..7aa8146
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/provides.py
@@ -0,0 +1,152 @@
+"""
+This is the provides side of the interface layer, for use only by the
+OpenStack integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for OpenStack integration
+ features. The OpenStack integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class OpenStackIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.openstack.requests-pending')
+ def handle_requests():
+ openstack = endpoint_from_flag('endpoint.openstack.requests-pending')
+ for request in openstack.requests:
+ request.set_credentials(layer.openstack.get_user_credentials())
+ openstack.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.all_requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def all_requests(self):
+ """
+ A list of all of the #IntegrationRequests that have been made.
+ """
+ if not hasattr(self, '_all_requests'):
+ self._all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ return self._all_requests
+
+ @property
+ def new_requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that have been made.
+ """
+ is_changed = attrgetter('is_changed')
+ return list(filter(is_changed, self.all_requests))
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ clear_flag(self.expand_name('requests-pending'))
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ return not self.has_credentials
+
+ @property
+ def unit_name(self):
+ return self._unit.unit_name
+
+ def set_credentials(self,
+ auth_url,
+ region,
+ username,
+ password,
+ user_domain_name,
+ project_domain_name,
+ project_name,
+ endpoint_tls_ca,
+ version=None):
+ """
+ Set the credentials for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'auth_url': auth_url,
+ 'region': region,
+ 'username': username,
+ 'password': password,
+ 'user_domain_name': user_domain_name,
+ 'project_domain_name': project_domain_name,
+ 'project_name': project_name,
+ 'endpoint_tls_ca': endpoint_tls_ca,
+ 'version': version,
+ })
+
+ def set_lbaas_config(self,
+ subnet_id,
+ floating_network_id,
+ lb_method,
+ manage_security_groups,
+ has_octavia=None):
+ """
+ Set the load-balancer-as-a-service config for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'subnet_id': subnet_id,
+ 'floating_network_id': floating_network_id,
+ 'lb_method': lb_method,
+ 'manage_security_groups': manage_security_groups,
+ 'has_octavia': has_octavia,
+ })
+
+ def set_block_storage_config(self,
+ bs_version,
+ trust_device_path,
+ ignore_volume_az):
+ """
+ Set the block storage config for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'bs_version': bs_version,
+ 'trust_device_path': trust_device_path,
+ 'ignore_volume_az': ignore_volume_az,
+ })
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not credentials have been set via `set_credentials`.
+ """
+ return 'credentials' in self._unit.relation.to_publish
diff --git a/kubernetes-master/hooks/relations/openstack-integration/pydocmd.yml b/kubernetes-master/hooks/relations/openstack-integration/pydocmd.yml
new file mode 100644
index 0000000..aa0a286
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'OpenStack Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.OpenStackIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.OpenStackIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/openstack-integration/requires.py b/kubernetes-master/hooks/relations/openstack-integration/requires.py
new file mode 100644
index 0000000..420f767
--- /dev/null
+++ b/kubernetes-master/hooks/relations/openstack-integration/requires.py
@@ -0,0 +1,254 @@
+"""
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with OpenStack native features. The integration will be
+provided by the OpenStack integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of OpenStack
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific OpenStack features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the OpenStack instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+"""
+
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag, is_flag_set
+from charms.reactive import data_changed
+
+
+class OpenStackIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.openstack.ready')
+ def openstack_integration_ready():
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+ update_config_enable_openstack(openstack)
+ ```
+ """
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single OpenStack integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single OpenStack integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ was_ready = is_flag_set(self.expand_name('ready'))
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ if self.is_ready and was_ready and self.is_changed:
+ set_flag(self.expand_name('ready.changed'))
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ # Although more information can be passed, such as LBaaS access
+ # the minimum needed to be considered ready is defined here
+ return all(field is not None for field in [
+ self.auth_url,
+ self.username,
+ self.password,
+ self.user_domain_name,
+ self.project_domain_name,
+ self.project_name,
+ ])
+
+ @property
+ def is_changed(self):
+ """
+ Whether or not the request for this instance has changed.
+ """
+ return data_changed(self.expand_name('all-data'), [
+ self.auth_url,
+ self.region,
+ self.username,
+ self.password,
+ self.user_domain_name,
+ self.project_domain_name,
+ self.project_name,
+ self.endpoint_tls_ca,
+ self.subnet_id,
+ self.floating_network_id,
+ self.lb_method,
+ self.manage_security_groups,
+ ])
+
+ @property
+ def auth_url(self):
+ """
+ The authentication endpoint URL.
+ """
+ return self._received['auth_url']
+
+ @property
+ def region(self):
+ """
+ The region name.
+ """
+ return self._received['region']
+
+ @property
+ def username(self):
+ """
+ The username.
+ """
+ return self._received['username']
+
+ @property
+ def password(self):
+ """
+ The password.
+ """
+ return self._received['password']
+
+ @property
+ def user_domain_name(self):
+ """
+ The user domain name.
+ """
+ return self._received['user_domain_name']
+
+ @property
+ def project_domain_name(self):
+ """
+ The project domain name.
+ """
+ return self._received['project_domain_name']
+
+ @property
+ def project_name(self):
+ """
+ The project name, also known as the tenant ID.
+ """
+ return self._received['project_name']
+
+ @property
+ def endpoint_tls_ca(self):
+ """
+ Optional base64-encoded CA certificate for the authentication endpoint,
+ or None.
+ """
+ return self._received['endpoint_tls_ca'] or None
+
+ @property
+ def version(self):
+ """
+ Optional version number for the APIs or None.
+ """
+ return self._received['version'] or None
+
+ @property
+ def subnet_id(self):
+ """
+ Optional subnet ID to work in, or None.
+ """
+ return self._received['subnet_id']
+
+ @property
+ def floating_network_id(self):
+ """
+ Optional floating network ID, or None.
+ """
+ return self._received['floating_network_id']
+
+ @property
+ def lb_method(self):
+ """
+ Optional load-balancer method, or None.
+ """
+ return self._received['lb_method']
+
+ @property
+ def manage_security_groups(self):
+ """
+ Whether or not the Load Balancer should automatically manage security
+ group rules.
+
+ Will be `True` or `False`.
+ """
+ return self._received['manage_security_groups'] or False
+
+ @property
+ def bs_version(self):
+ """
+ What block storage API version to use, `auto` if autodetection is
+ desired, or `None` to use the default.
+ """
+ return self._received['bs_version']
+
+ @property
+ def trust_device_path(self):
+ """
+ Whether to trust the block device name provided by Ceph.
+
+ Will be `True`, `False`, or `None`.
+ """
+ return self._received['trust_device_path']
+
+ @property
+ def ignore_volume_az(self):
+ """
+ Whether to ignore availability zones when attaching Cinder volumes.
+
+ Will be `True`, `False`, or `None`.
+ """
+ return self._received['ignore_volume_az']
+
+ @property
+ def has_octavia(self):
+ """
+ Whether the underlying OpenStack supports Octavia instead of
+ Neutron-based LBaaS.
+
+ Will either be True, False, or None if it could not be determined for
+ some reason (typically due to connecting to an older integrator charm).
+ """
+ return self._received['has_octavia']
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/.gitignore b/kubernetes-master/hooks/relations/prometheus-manual/.gitignore
new file mode 100644
index 0000000..01a6a44
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/.gitignore
@@ -0,0 +1,3 @@
+.docs
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/LICENSE b/kubernetes-master/hooks/relations/prometheus-manual/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/README.md b/kubernetes-master/hooks/relations/prometheus-manual/README.md
new file mode 100644
index 0000000..4ff5c83
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/README.md
@@ -0,0 +1,113 @@
+# Interface prometheus-manual
+
+This is a [Juju][] interface layer that enables a charm which provides manual
+or raw metric scraper job configuration stanzas for Prometheus 2.
+
+The format for the job configuration data can be found in the [Prometheus
+Configuration Docs][]. The job configuration will be included as an item
+under `scrape_configs` largely unchanged, except for two things:
+
+* To ensure uniqueness, the provided job name will have a UUID appended to it.
+* Because the CA cert must be written to disk separately from the config, any
+ `tls_config` sections will have their `ca_file` field values replaced with
+ the path to the file where the provided `ca_cert` data is written.
+
+# Example Usage
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+provides:
+ prometheus:
+ interface: prometheus-manual
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:prometheus-manual
+```
+
+Then, in your reactive code, add the following, modifying the job data as
+your charm needs:
+
+```python
+from charms.reactive import endpoint_from_flag
+
+
+@when('endpoint.prometheus.joined',
+ 'tls.ca.available')
+def register_prometheus_jobs():
+ prometheus = endpoint_from_flag('endpoint.prometheus.joined')
+ tls = endpoint_from_flag('tls.ca.available')
+ prometheus.register_job(job_name='kubernetes-apiservers',
+ ca_cert=tls.root_ca_cert,
+ job_data={
+ 'kubernetes_sd_configs': [{'role': 'endpoints'}],
+ 'scheme': 'https',
+ 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename
+ 'bearer_token': get_token('system:prometheus'),
+ })
+ prometheus.register_job(job_name='kubernetes-nodes',
+ ca_cert=tls.root_ca_cert,
+ job_data={
+ 'kubernetes_sd_configs': [{'role': 'node'}],
+ 'scheme': 'https',
+ 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename
+ 'bearer_token': get_token('system:prometheus'),
+ })
+```
+
+
+
+# Reference
+
+* [common.md](common.md)
+ * [JobRequest](docs/common.md#jobrequest)
+ * [egress_subnets](docs/common.md#jobrequest-egress_subnets)
+ * [fromkeys](docs/common.md#jobrequest-fromkeys)
+ * [ingress_address](docs/common.md#jobrequest-ingress_address)
+ * [is_created](docs/common.md#jobrequest-is_created)
+ * [is_received](docs/common.md#jobrequest-is_received)
+ * [respond](docs/common.md#jobrequest-respond)
+ * [to_json](docs/common.md#jobrequest-to_json)
+ * [JobResponse](docs/common.md#jobresponse)
+ * [fromkeys](docs/common.md#jobresponse-fromkeys)
+* [provides.md](provides.md)
+ * [PrometheusManualProvides](docs/provides.md#prometheusmanualprovides)
+ * [all_departed_units](docs/provides.md#prometheusmanualprovides-all_departed_units)
+ * [all_joined_units](docs/provides.md#prometheusmanualprovides-all_joined_units)
+ * [all_units](docs/provides.md#prometheusmanualprovides-all_units)
+ * [endpoint_name](docs/provides.md#prometheusmanualprovides-endpoint_name)
+ * [is_joined](docs/provides.md#prometheusmanualprovides-is_joined)
+ * [joined](docs/provides.md#prometheusmanualprovides-joined)
+ * [manage_flags](docs/provides.md#prometheusmanualprovides-manage_flags)
+ * [register_job](docs/provides.md#prometheusmanualprovides-register_job)
+ * [relations](docs/provides.md#prometheusmanualprovides-relations)
+ * [requests](docs/provides.md#prometheusmanualprovides-requests)
+ * [responses](docs/provides.md#prometheusmanualprovides-responses)
+* [requires.md](requires.md)
+ * [PrometheusManualRequires](docs/requires.md#prometheusmanualrequires)
+ * [all_departed_units](docs/requires.md#prometheusmanualrequires-all_departed_units)
+ * [all_joined_units](docs/requires.md#prometheusmanualrequires-all_joined_units)
+ * [all_requests](docs/requires.md#prometheusmanualrequires-all_requests)
+ * [all_units](docs/requires.md#prometheusmanualrequires-all_units)
+ * [endpoint_name](docs/requires.md#prometheusmanualrequires-endpoint_name)
+ * [is_joined](docs/requires.md#prometheusmanualrequires-is_joined)
+ * [jobs](docs/requires.md#prometheusmanualrequires-jobs)
+ * [joined](docs/requires.md#prometheusmanualrequires-joined)
+ * [manage_flags](docs/requires.md#prometheusmanualrequires-manage_flags)
+ * [new_jobs](docs/requires.md#prometheusmanualrequires-new_jobs)
+ * [new_requests](docs/requires.md#prometheusmanualrequires-new_requests)
+ * [relations](docs/requires.md#prometheusmanualrequires-relations)
+
+
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Prometheus Configuration Docs]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/__init__.py b/kubernetes-master/hooks/relations/prometheus-manual/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/common.py b/kubernetes-master/hooks/relations/prometheus-manual/common.py
new file mode 100644
index 0000000..530f965
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/common.py
@@ -0,0 +1,57 @@
+import json
+from copy import deepcopy
+
+from charms.reactive import BaseRequest, BaseResponse, Field
+
+
+class JobResponse(BaseResponse):
+ success = Field('Whether or not the registration succeeded')
+ reason = Field('If failed, a description of why')
+
+
+class JobRequest(BaseRequest):
+ RESPONSE_CLASS = JobResponse
+
+ job_name = Field('Desired name for the job. To ensure uniqueness, the '
+ 'the request ID will be appended to the final job name.')
+
+ job_data = Field('Config data for the job.')
+
+ ca_cert = Field('Cert data for the CA used to validate connections.')
+
+ def to_json(self, ca_file=None):
+ """
+ Render the job request to JSON string which can be included directly
+ into Prometheus config.
+
+ Keys will be sorted in the rendering to ensure a stable ordering for
+ comparisons to detect changes.
+
+ If `ca_file` is given, it will be used to replace the value of any
+ `ca_file` fields in the job. The charm should ensure that the
+ request's `ca_cert` data is writen to that path prior to calling this
+ method.
+ """
+ job_data = deepcopy(self.job_data) # make a copy we can modify
+ job_data['job_name'] = '{}-{}'.format(self.job_name, self.request_id)
+
+ if ca_file:
+ for key, value in job_data.items():
+ # update the cert path at the job level
+ if key == 'tls_config':
+ value['ca_file'] = str(ca_file)
+
+ # update the cert path at the SD config level
+ if key.endswith('_sd_configs'):
+ for sd_config in value:
+ if 'ca_file' in sd_config.get('tls_config', {}):
+ sd_config['tls_config']['ca_file'] = str(ca_file)
+
+ return json.dumps(job_data, sort_keys=True)
+
+ def respond(self, success, reason=None):
+ """
+ Acknowledge this request, and indicate success or failure with an
+ optional explanation.
+ """
+ super().respond(success=success, reason=reason)
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/copyright b/kubernetes-master/hooks/relations/prometheus-manual/copyright
new file mode 100644
index 0000000..69768db
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/docs/common.md b/kubernetes-master/hooks/relations/prometheus-manual/docs/common.md
new file mode 100644
index 0000000..a97d54b
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/docs/common.md
@@ -0,0 +1,62 @@
+# `class JobRequest(BaseRequest)`
+
+Base class for requests using the request / response pattern.
+
+Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of
+the :class:`BaseResponse` which defines the fields that the response will
+use. They must also define additional attributes as :class:`Field`s.
+
+For example::
+
+ class TLSResponse(BaseResponse):
+ key = Field('Private key for the cert')
+ cert = Field('Public cert info')
+
+
+ class TLSRequest(BaseRequest):
+ RESPONSE_CLASS = TLSResponse
+
+ common_name = Field('Common Name (CN) for the cert to be created')
+ sans = Field('List of Subject Alternative Names (SANs)')
+
+## `egress_subnets`
+
+Subnets over which network traffic to the requester will flow.
+
+## `None`
+
+Returns a new dict with keys from iterable and values equal to value.
+
+## `ingress_address`
+
+Address to use if a connection to the requester is required.
+
+## `is_created`
+
+Whether this request was created by this side of the relation.
+
+## `is_received`
+
+Whether this request was received by the other side of the relation.
+
+## `def respond(self, success, reason=None)`
+
+Acknowledge this request, and indicate success or failure with an
+optional explanation.
+
+## `def to_json(self)`
+
+Render the job request to JSON string which can be included directly
+into Prometheus config.
+
+Keys will be sorted in the rendering to ensure a stable ordering for
+comparisons to detect changes.
+
+# `class JobResponse(BaseResponse)`
+
+Base class for responses using the request / response pattern.
+
+## `None`
+
+Returns a new dict with keys from iterable and values equal to value.
+
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/docs/provides.md b/kubernetes-master/hooks/relations/prometheus-manual/docs/provides.md
new file mode 100644
index 0000000..439b4e5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/docs/provides.md
@@ -0,0 +1,119 @@
+# `class PrometheusManualProvides(RequesterEndpoint)`
+
+Base class for Endpoints that create requests in the request / response
+pattern.
+
+Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass
+of :class:`BaseRequest` which defines the fields the request will use.
+
+## `all_departed_units`
+
+Collection of all units that were previously part of any relation on
+this endpoint but which have since departed.
+
+This collection is persistent and mutable. The departed units will
+be kept until they are explicitly removed, to allow for reasonable
+cleanup of units that have left.
+
+Example: You need to run a command each time a unit departs the relation.
+
+.. code-block:: python
+
+ @when('endpoint.{endpoint_name}.departed')
+ def handle_departed_unit(self):
+ for name, unit in self.all_departed_units.items():
+ # run the command to remove `unit` from the cluster
+ # ..
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+Once a unit is departed, it will no longer show up in
+:attr:`all_joined_units`. Note that units are considered departed as
+soon as the departed hook is entered, which differs slightly from how
+the Juju primitives behave (departing units are still returned from
+``related-units`` until after the departed hook is complete).
+
+This collection is a :class:`KeyList`, so can be used as a mapping to
+look up units by their unit name, or iterated or accessed by index.
+
+## `all_joined_units`
+
+A list view of all the units of all relations attached to this
+:class:`~charms.reactive.endpoints.Endpoint`.
+
+This is actually a
+:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units
+will be in order by relation ID and then unit name, and you can access a
+merged view of all the units' data as a single mapping. You should be
+very careful when using the merged data collections, however, and
+consider carefully what will happen when the endpoint has multiple
+relations and multiple remote units on each. It is probably better to
+iterate over each unit and handle its data individually. See
+:class:`~charms.reactive.endpoints.CombinedUnitsView` for an
+explanation of how the merged data collections work.
+
+Note that, because a given application might be related multiple times
+on a given endpoint, units may show up in this collection more than
+once.
+
+## `all_units`
+
+.. deprecated:: 0.6.1
+ Use :attr:`all_joined_units` instead
+
+## `endpoint_name`
+
+Relation name of this endpoint.
+
+## `is_joined`
+
+Whether this endpoint has remote applications attached to it.
+
+## `joined`
+
+.. deprecated:: 0.6.3
+ Use :attr:`is_joined` instead
+
+## `def manage_flags(self)`
+
+Method that subclasses can override to perform any flag management
+needed during startup.
+
+This will be called automatically after the framework-managed automatic
+flags have been updated.
+
+## `def register_job(self, job_name, job_data, ca_cert=None)`
+
+Register a manual job.
+
+The job data should be the (unserialized) data defining the job.
+
+To ensure uniqueness, a UUID will be added to the job name, and it will
+be injected into the job data.
+
+If a CA cert is given, the value of any ca_file field in the job data
+will be replaced with a filename after the CA cert data is written, so
+a placeholder value should be used.
+
+## `relations`
+
+Collection of :class:`Relation` instances that are established for
+this :class:`Endpoint`.
+
+This is a :class:`KeyList`, so it can be iterated and indexed as a list,
+or you can look up relations by their ID. For example::
+
+ rel0 = endpoint.relations[0]
+ assert rel0 is endpoint.relations[rel0.relation_id]
+ assert all(rel is endpoint.relations[rel.relation_id]
+ for rel in endpoint.relations)
+ print(', '.join(endpoint.relations.keys()))
+
+## `requests`
+
+A list of all requests which have been submitted.
+
+## `responses`
+
+A list of all responses which have been received.
+
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/docs/requires.md b/kubernetes-master/hooks/relations/prometheus-manual/docs/requires.md
new file mode 100644
index 0000000..31a7e8f
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/docs/requires.md
@@ -0,0 +1,117 @@
+# `class PrometheusManualRequires(ResponderEndpoint)`
+
+Base class for Endpoints that respond to requests in the request / response
+pattern.
+
+Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass
+of :class:`BaseRequest` which defines the fields the request will use.
+
+## `all_departed_units`
+
+Collection of all units that were previously part of any relation on
+this endpoint but which have since departed.
+
+This collection is persistent and mutable. The departed units will
+be kept until they are explicitly removed, to allow for reasonable
+cleanup of units that have left.
+
+Example: You need to run a command each time a unit departs the relation.
+
+.. code-block:: python
+
+ @when('endpoint.{endpoint_name}.departed')
+ def handle_departed_unit(self):
+ for name, unit in self.all_departed_units.items():
+ # run the command to remove `unit` from the cluster
+ # ..
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+Once a unit is departed, it will no longer show up in
+:attr:`all_joined_units`. Note that units are considered departed as
+soon as the departed hook is entered, which differs slightly from how
+the Juju primitives behave (departing units are still returned from
+``related-units`` until after the departed hook is complete).
+
+This collection is a :class:`KeyList`, so can be used as a mapping to
+look up units by their unit name, or iterated or accessed by index.
+
+## `all_joined_units`
+
+A list view of all the units of all relations attached to this
+:class:`~charms.reactive.endpoints.Endpoint`.
+
+This is actually a
+:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units
+will be in order by relation ID and then unit name, and you can access a
+merged view of all the units' data as a single mapping. You should be
+very careful when using the merged data collections, however, and
+consider carefully what will happen when the endpoint has multiple
+relations and multiple remote units on each. It is probably better to
+iterate over each unit and handle its data individually. See
+:class:`~charms.reactive.endpoints.CombinedUnitsView` for an
+explanation of how the merged data collections work.
+
+Note that, because a given application might be related multiple times
+on a given endpoint, units may show up in this collection more than
+once.
+
+## `all_requests`
+
+A list of all requests, including ones which have been responded to.
+
+## `all_units`
+
+.. deprecated:: 0.6.1
+ Use :attr:`all_joined_units` instead
+
+## `endpoint_name`
+
+Relation name of this endpoint.
+
+## `is_joined`
+
+Whether this endpoint has remote applications attached to it.
+
+## `jobs`
+
+Return a list of all jobs to be registered.
+
+## `joined`
+
+.. deprecated:: 0.6.3
+ Use :attr:`is_joined` instead
+
+## `def manage_flags(self)`
+
+Method that subclasses can override to perform any flag management
+needed during startup.
+
+This will be called automatically after the framework-managed automatic
+flags have been updated.
+
+## `new_jobs`
+
+Return a list of new jobs to be registered.
+
+## `new_requests`
+
+A list of requests which have not been responded.
+
+Requests should be handled by the charm and then responded to by
+calling ``request.respond(...)``.
+
+## `relations`
+
+Collection of :class:`Relation` instances that are established for
+this :class:`Endpoint`.
+
+This is a :class:`KeyList`, so it can be iterated and indexed as a list,
+or you can look up relations by their ID. For example::
+
+ rel0 = endpoint.relations[0]
+ assert rel0 is endpoint.relations[rel0.relation_id]
+ assert all(rel is endpoint.relations[rel.relation_id]
+ for rel in endpoint.relations)
+ print(', '.join(endpoint.relations.keys()))
+
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/interface.yaml b/kubernetes-master/hooks/relations/prometheus-manual/interface.yaml
new file mode 100644
index 0000000..5c324c6
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/interface.yaml
@@ -0,0 +1,6 @@
+name: prometheus-manual
+summary: Interface for registering manual job definitions with Prometheus
+version: 1
+maintainer: "Cory Johns "
+exclude:
+ - .docs
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/provides.py b/kubernetes-master/hooks/relations/prometheus-manual/provides.py
new file mode 100644
index 0000000..884629c
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/provides.py
@@ -0,0 +1,41 @@
+from charms.reactive import (
+ toggle_flag,
+ RequesterEndpoint,
+)
+
+from .common import JobRequest
+
+
+class PrometheusManualProvides(RequesterEndpoint):
+ REQUEST_CLASS = JobRequest
+
+ def manage_flags(self):
+ super().manage_flags()
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined and self.requests)
+
+ def register_job(self, job_name, job_data, ca_cert=None, relation=None):
+ """
+ Register a manual job.
+
+ The job data should be the (unserialized) data defining the job.
+
+ To ensure uniqueness, a UUID will be added to the job name, and it will
+ be injected into the job data.
+
+ If a CA cert is given, the value of any ca_file field in the job data
+ will be replaced with a filename after the CA cert data is written, so
+ a placeholder value should be used.
+
+ If a specific relation is not given, the job will be registered with
+ every related Prometheus.
+ """
+ # we might be connected to multiple prometheuses for some strange
+ # reason, so just send the job to all of them
+ relations = [relation] if relation is not None else self.relations
+ for relation in relations:
+ JobRequest.create_or_update(match_fields=['job_name'],
+ relation=relation,
+ job_name=job_name,
+ job_data=job_data,
+ ca_cert=ca_cert)
diff --git a/kubernetes-master/hooks/relations/prometheus-manual/requires.py b/kubernetes-master/hooks/relations/prometheus-manual/requires.py
new file mode 100644
index 0000000..a8d1acb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/prometheus-manual/requires.py
@@ -0,0 +1,31 @@
+from charms.reactive import (
+ toggle_flag,
+ ResponderEndpoint,
+)
+
+from .common import JobRequest
+
+
+class PrometheusManualRequires(ResponderEndpoint):
+ REQUEST_CLASS = JobRequest
+
+ def manage_flags(self):
+ super().manage_flags()
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.has_jobs'),
+ self.is_joined and self.jobs)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.new_jobs'),
+ self.is_joined and self.new_jobs)
+
+ @property
+ def jobs(self):
+ """
+ Return a list of all jobs to be registered.
+ """
+ return self.all_requests
+
+ @property
+ def new_jobs(self):
+ """
+ Return a list of new jobs to be registered.
+ """
+ return self.new_requests
diff --git a/kubernetes-master/hooks/relations/public-address/README.md b/kubernetes-master/hooks/relations/public-address/README.md
new file mode 100644
index 0000000..06be3ae
--- /dev/null
+++ b/kubernetes-master/hooks/relations/public-address/README.md
@@ -0,0 +1,59 @@
+# Overview
+
+This interface layer implements a public address protocol useful for load
+balancers and their subordinates. The load balancers (providers) set their
+own public address and port, which is then available to the subordinates
+(requirers).
+
+# Usage
+
+## Provides
+
+By providing the `public-address` interface, your charm is providing an HTTP
+server that can load-balance for another HTTP based service.
+
+Your charm need only provide the address and port on which it is serving its
+content, as soon as the `{relation_name}.available` state is set:
+
+```python
+from charmhelpers.core import hookenv
+@when('website.available')
+def configure_website(website):
+ website.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `public-address` interface, your charm is consuming one or
+more HTTP servers, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `get_addresses_ports()` method returns a list of available addresses and
+ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'public-address': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charmhelpers.core import hookenv
+@when('loadbalancer.available')
+def update_reverse_proxy_config(loadbalancer):
+ hosts = loadbalancer.get_addresses_ports()
+ for host in hosts:
+ hookenv.log('The loadbalancer for this unit is {}:{}'.format(
+ host['public-address'],
+ host['port']))
+```
diff --git a/kubernetes-master/hooks/relations/public-address/__init__.py b/kubernetes-master/hooks/relations/public-address/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/public-address/interface.yaml b/kubernetes-master/hooks/relations/public-address/interface.yaml
new file mode 100644
index 0000000..c9849e4
--- /dev/null
+++ b/kubernetes-master/hooks/relations/public-address/interface.yaml
@@ -0,0 +1,4 @@
+name: public-address
+summary: A basic interface to provide the public address for load balancers.
+version: 1
+repo: https://githb.com/juju-solutions/interface-public-address.git
diff --git a/kubernetes-master/hooks/relations/public-address/provides.py b/kubernetes-master/hooks/relations/public-address/provides.py
new file mode 100644
index 0000000..09b9915
--- /dev/null
+++ b/kubernetes-master/hooks/relations/public-address/provides.py
@@ -0,0 +1,60 @@
+import json
+
+from charms.reactive import toggle_flag
+from charms.reactive import Endpoint
+
+
+class PublicAdddressProvides(Endpoint):
+
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.is_joined)
+
+ def set_address_port(self, address, port, relation=None):
+ if relation is None:
+ # no relation specified, so send the same data to everyone
+ relations = self.relations
+ else:
+ # specific relation given, so only send the data to that one
+ relations = [relation]
+ if type(address) is list:
+ # build 2 lists to zip together that are the same length
+ length = len(address)
+ p = [port] * length
+ combined = zip(address, p)
+ clients = [{'public-address': a, 'port': p}
+ for a, p in combined]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ first = clients.pop(0)
+ first['extended_data'] = json.dumps(clients)
+ for relation in relations:
+ relation.to_publish_raw.update(first)
+ else:
+ for relation in relations:
+ relation.to_publish_raw.update({'public-address': address,
+ 'port': port})
+
+ @property
+ def requests(self):
+ return [Request(rel) for rel in self.relations]
+
+
+class Request:
+ def __init__(self, rel):
+ self.rel = rel
+
+ @property
+ def application_name(self):
+ return self.rel.application_name
+
+ @property
+ def members(self):
+ return [(u.received_raw.get('ingress-address',
+ u.received_raw['private-address']),
+ u.received_raw.get('port', '6443'))
+ for u in self.rel.joined_units]
+
+ def set_address_port(self, address, port):
+ self.rel.endpoint.set_address_port(address, port, self.rel)
diff --git a/kubernetes-master/hooks/relations/public-address/requires.py b/kubernetes-master/hooks/relations/public-address/requires.py
new file mode 100644
index 0000000..467d129
--- /dev/null
+++ b/kubernetes-master/hooks/relations/public-address/requires.py
@@ -0,0 +1,44 @@
+import json
+
+from charms.reactive import toggle_flag, Endpoint
+
+
+class PublicAddressRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ len(self.get_addresses_ports()) > 0)
+
+ def set_backend_port(self, port):
+ """
+ Set the port that the backend service is listening on.
+
+ Defaults to 6443 if not set.
+ """
+ for rel in self.relations:
+ rel.to_publish_raw['port'] = str(port)
+
+ def get_addresses_ports(self):
+ '''Returns a list of available HTTP providers and their associated
+ public addresses and ports.
+
+ The return value is a list of dicts of the following form::
+ [
+ {
+ 'public-address': address_for_frontend,
+ 'port': port_for_frontend,
+ },
+ # ...
+ ]
+ '''
+ hosts = set()
+ for relation in self.relations:
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ hosts.add((data['public-address'], data['port']))
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ hosts.add((ed['public-address'], ed['port']))
+
+ return [{'public-address': pa, 'port': p}
+ for pa, p in sorted(host for host in hosts
+ if None not in host)]
diff --git a/kubernetes-master/hooks/relations/tls-certificates/.gitignore b/kubernetes-master/hooks/relations/tls-certificates/.gitignore
new file mode 100644
index 0000000..93813bc
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/.gitignore
@@ -0,0 +1,4 @@
+.tox
+__pycache__
+*.pyc
+_build
diff --git a/kubernetes-master/hooks/relations/tls-certificates/README.md b/kubernetes-master/hooks/relations/tls-certificates/README.md
new file mode 100644
index 0000000..733da6d
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/README.md
@@ -0,0 +1,90 @@
+# Interface tls-certificates
+
+This is a [Juju][] interface layer that enables a charm which requires TLS
+certificates to relate to a charm which can provide them, such as [Vault][] or
+[EasyRSA][]
+
+To get started please read the [Introduction to PKI][] which defines some PKI
+terms, concepts and processes used in this document.
+
+# Example Usage
+
+Let's say you have a charm which needs a server certificate for a service it
+provides to other charms and a client certificate for a database it consumes
+from another charm. The charm provides its own service on the `clients`
+relation endpoint, and it consumes the database on the `db` relation endpoint.
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+requires:
+ cert-provider:
+ interface: tls-certificates
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:tls-certificates
+```
+
+Then, in your reactive code, add the following, changing `update_certs` to
+handle the certificates however your charm needs:
+
+```python
+from charmhelpers.core import hookenv, host
+from charms.reactive import endpoint_from_flag
+
+
+@when('cert-provider.ca.changed')
+def install_root_ca_cert():
+ cert_provider = endpoint_from_flag('cert-provider.ca.available')
+ host.install_ca_cert(cert_provider.root_ca_cert)
+ clear_flag('cert-provider.ca.changed')
+
+
+@when('cert-provider.available')
+def request_certificates():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+
+ # get ingress info
+ ingress_for_clients = hookenv.network_get('clients')['ingress-addresses']
+ ingress_for_db = hookenv.network_get('db')['ingress-addresses']
+
+ # use first ingress address as primary and any additional as SANs
+ server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1]
+ client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1]
+
+ # request a single server and single client cert; note that multiple certs
+ # of either type can be requested as long as they have unique common names
+ cert_provider.request_server_cert(server_cn, server_sans)
+ cert_provider.request_client_cert(client_cn, client_sans)
+
+
+@when('cert-provider.certs.changed')
+def update_certs():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+ server_cert = cert_provider.server_certs[0] # only requested one
+ myserver.update_server_cert(server_cert.cert, server_cert.key)
+
+ client_cert = cert_provider.client_certs[0] # only requested one
+ myclient.update_client_cert(client_cert.cert, client_cert.key)
+ clear_flag('cert-provider.certs.changed')
+```
+
+
+# Reference
+
+ * [Requires](docs/requires.md)
+ * [Provides](docs/provides.md)
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Vault]: https://jujucharms.com/u/openstack-charmers/vault
+[EasyRSA]: https://jujucharms.com/u/containers/easyrsa
+[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md
diff --git a/kubernetes-master/hooks/relations/tls-certificates/__init__.py b/kubernetes-master/hooks/relations/tls-certificates/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/tls-certificates/docs/common.md b/kubernetes-master/hooks/relations/tls-certificates/docs/common.md
new file mode 100644
index 0000000..25d0e08
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/docs/common.md
@@ -0,0 +1,51 @@
+
+
+Name of the application which the request came from.
+
+:returns: Name of application
+:rtype: str
+
+
cert
+
+
+The cert published for this request, if any.
+
+
cert_type
+
+
+Type of certificate, 'server' or 'client', being requested.
+
+
resolve_unit_name
+
+```python
+CertificateRequest.resolve_unit_name(unit)
+```
+Return name of unit associated with this request.
+
+unit_name should be provided in the relation data to ensure
+compatability with cross-model relations. If the unit name
+is absent then fall back to unit_name attribute of the
+unit associated with this request.
+
+:param unit: Unit to extract name from
+:type unit: charms.reactive.endpoints.RelatedUnit
+:returns: Name of unit
+:rtype: str
+
+
Certificate
+
+```python
+Certificate(self, cert_type, common_name, cert, key)
+```
+
+Represents a created certificate and key.
+
+The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+be accessed either as properties or as the contents of the dict.
+
diff --git a/kubernetes-master/hooks/relations/tls-certificates/docs/provides.md b/kubernetes-master/hooks/relations/tls-certificates/docs/provides.md
new file mode 100644
index 0000000..c213546
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/docs/provides.md
@@ -0,0 +1,212 @@
+
provides
+
+
+
TlsProvides
+
+```python
+TlsProvides(self, endpoint_name, relation_ids=None)
+```
+
+The provider's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[all_requests]: provides.md#provides.TlsProvides.all_requests
+[new_requests]: provides.md#provides.TlsProvides.new_requests
+[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+
+
all_published_certs
+
+
+List of all [Certificate][] instances that this provider has published
+for all related applications.
+
+
all_requests
+
+
+List of all requests that have been made.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('certs.regen',
+ 'tls.certs.available')
+def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_application_requests
+
+
+Filtered view of [new_requests][] that only includes application cert
+requests.
+
+Each will be an instance of [ApplicationCertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.application.certs.requested')
+def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_client_requests
+
+
+Filtered view of [new_requests][] that only includes client cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.client.certs.requested')
+def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_requests
+
+
+Filtered view of [all_requests][] that only includes requests that
+haven't been handled.
+
+Each will be an instance of [CertificateRequest][].
+
+This collection can also be further filtered by request type using
+[new_server_requests][] or [new_client_requests][].
+
+Example usage:
+
+```python
+@when('tls.certs.requested')
+def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_server_requests
+
+
+Filtered view of [new_requests][] that only includes server cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.server.certs.requested')
+def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
set_ca
+
+```python
+TlsProvides.set_ca(certificate_authority)
+```
+
+Publish the CA to all related applications.
+
+
set_chain
+
+```python
+TlsProvides.set_chain(chain)
+```
+
+Publish the chain of trust to all related applications.
+
+
set_client_cert
+
+```python
+TlsProvides.set_client_cert(cert, key)
+```
+
+Deprecated. This is only for backwards compatibility.
+
+Publish a globally shared client cert and key.
+
+
set_server_cert
+
+```python
+TlsProvides.set_server_cert(scope, cert, key)
+```
+
+Deprecated. Use one of the [new_requests][] collections and
+`request.set_cert()` instead.
+
+Set the server cert and key for the request identified by `scope`.
+
+
+
+```python
+TlsProvides.get_server_requests()
+```
+
+Deprecated. Use the [new_requests][] or [server_requests][]
+collections instead.
+
+One provider can have many requests to generate server certificates.
+Return a map of all server request objects indexed by a unique
+identifier.
+
diff --git a/kubernetes-master/hooks/relations/tls-certificates/docs/requires.md b/kubernetes-master/hooks/relations/tls-certificates/docs/requires.md
new file mode 100644
index 0000000..fdec902
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/docs/requires.md
@@ -0,0 +1,207 @@
+
requires
+
+
+
TlsRequires
+
+```python
+TlsRequires(self, endpoint_name, relation_ids=None)
+```
+
+The client's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+[server_certs]: requires.md#requires.TlsRequires.server_certs
+[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+[client_certs]: requires.md#requires.TlsRequires.server_certs
+
+
application_certs
+
+
+List of [Certificate][] instances for all available application certs.
+
+
client_certs
+
+
+List of [Certificate][] instances for all available client certs.
+
+
client_certs_map
+
+
+Mapping of client [Certificate][] instances by their `common_name`.
+
+
root_ca_cert
+
+
+Root CA certificate.
+
+
root_ca_chain
+
+
+The chain of trust for the root CA.
+
+
server_certs
+
+
+List of [Certificate][] instances for all available server certs.
+
+
server_certs_map
+
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
get_ca
+
+```python
+TlsRequires.get_ca()
+```
+
+Return the root CA certificate.
+
+Same as [root_ca_cert][].
+
+
get_chain
+
+```python
+TlsRequires.get_chain()
+```
+
+Return the chain of trust for the root CA.
+
+Same as [root_ca_chain][].
+
+
get_client_cert
+
+```python
+TlsRequires.get_client_cert()
+```
+
+Deprecated. Use [request_client_cert][] and the [client_certs][]
+collection instead.
+
+Return a globally shared client certificate and key.
+
+
get_server_cert
+
+```python
+TlsRequires.get_server_cert()
+```
+
+Deprecated. Use the [server_certs][] collection instead.
+
+Return the cert and key of the first server certificate requested.
+
+
get_batch_requests
+
+```python
+TlsRequires.get_batch_requests()
+```
+
+Deprecated. Use [server_certs_map][] instead.
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
request_server_cert
+
+```python
+TlsRequires.request_server_cert(cn, sans=None, cert_name=None)
+```
+
+Request a server certificate and key be generated for the given
+common name (`cn`) and optional list of alternative names (`sans`).
+
+The `cert_name` is deprecated and not needed.
+
+This can be called multiple times to request more than one server
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
+
+```python
+TlsRequires.request_server_certs()
+```
+
+Deprecated. Just use [request_server_cert][]; this does nothing.
+
+
request_client_cert
+
+```python
+TlsRequires.request_client_cert(cn, sans)
+```
+
+Request a client certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans`).
+
+This can be called multiple times to request more than one client
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
request_application_cert
+
+```python
+TlsRequires.request_application_cert(cn, sans)
+```
+
+Request an application certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans` ) of this
+unit and all peer units. All units will share a single certificates.
+
diff --git a/kubernetes-master/hooks/relations/tls-certificates/interface.yaml b/kubernetes-master/hooks/relations/tls-certificates/interface.yaml
new file mode 100644
index 0000000..beec53b
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/interface.yaml
@@ -0,0 +1,6 @@
+name: tls-certificates
+summary: |
+ A Transport Layer Security (TLS) charm layer that uses requires and provides
+ to exchange certifcates.
+version: 1
+repo: https://github.com/juju-solutions/interface-tls-certificates
diff --git a/kubernetes-master/hooks/relations/tls-certificates/make_docs b/kubernetes-master/hooks/relations/tls-certificates/make_docs
new file mode 100644
index 0000000..2f2274a
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/make_docs
@@ -0,0 +1,23 @@
+#!.tox/py3/bin/python
+
+import sys
+import importlib
+from pathlib import Path
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'cert': {'interface': 'tls-certificates'}},
+ 'provides': {'cert': {'interface': 'tls-certificates'}},
+ }
+ sys.path.append('..')
+ sys.modules[''] = importlib.import_module(Path.cwd().name)
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/tls-certificates/provides.py b/kubernetes-master/hooks/relations/tls-certificates/provides.py
new file mode 100644
index 0000000..0262baa
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/provides.py
@@ -0,0 +1,301 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+
+from .tls_certificates_common import (
+ ApplicationCertificateRequest,
+ CertificateRequest
+)
+
+
+class TlsProvides(Endpoint):
+ """
+ The provider's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [all_requests]: provides.md#provides.TlsProvides.all_requests
+ [new_requests]: provides.md#provides.TlsProvides.new_requests
+ [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+ [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+ toggle_flag(self.expand_name('{endpoint_name}.certs.requested'),
+ self.new_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'),
+ self.new_client_requests)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'),
+ self.new_application_requests)
+ # For backwards compatibility, set the old "cert" flags as well
+ toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'),
+ self.new_client_requests)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.server.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.client.certs.requested'))
+ clear_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'))
+
+ def set_ca(self, certificate_authority):
+ """
+ Publish the CA to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same CA, so send it to them.
+ relation.to_publish_raw['ca'] = certificate_authority
+
+ def set_chain(self, chain):
+ """
+ Publish the chain of trust to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same chain, so send it to them.
+ relation.to_publish_raw['chain'] = chain
+
+ def set_client_cert(self, cert, key):
+ """
+ Deprecated. This is only for backwards compatibility.
+
+ Publish a globally shared client cert and key.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'client.cert': cert,
+ 'client.key': key,
+ })
+
+ def set_server_cert(self, scope, cert, key):
+ """
+ Deprecated. Use one of the [new_requests][] collections and
+ `request.set_cert()` instead.
+
+ Set the server cert and key for the request identified by `scope`.
+ """
+ request = self.get_server_requests()[scope]
+ request.set_cert(cert, key)
+
+ def set_server_multicerts(self, scope):
+ """
+ Deprecated. Done automatically.
+ """
+ pass
+
+ def add_server_cert(self, scope, cn, cert, key):
+ '''
+ Deprecated. Use `request.set_cert()` instead.
+ '''
+ self.set_server_cert(scope, cert, key)
+
+ def get_server_requests(self):
+ """
+ Deprecated. Use the [new_requests][] or [server_requests][]
+ collections instead.
+
+ One provider can have many requests to generate server certificates.
+ Return a map of all server request objects indexed by a unique
+ identifier.
+ """
+ return {req._key: req for req in self.new_server_requests}
+
+ @property
+ def all_requests(self):
+ """
+ List of all requests that have been made.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('certs.regen',
+ 'tls.certs.available')
+ def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ requests = []
+ for unit in self.all_joined_units:
+ # handle older single server cert request
+ if unit.received_raw['common_name']:
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ unit.received_raw['certificate_name'],
+ unit.received_raw['common_name'],
+ unit.received['sans'],
+ ))
+
+ # handle mutli server cert requests
+ reqs = unit.received['cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+
+ # handle client cert requests
+ reqs = unit.received['client_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'client',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+ # handle application cert requests
+ reqs = unit.received['application_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(ApplicationCertificateRequest(
+ unit,
+ 'application',
+ common_name,
+ common_name,
+ req['sans']
+ ))
+ return requests
+
+ @property
+ def new_requests(self):
+ """
+ Filtered view of [all_requests][] that only includes requests that
+ haven't been handled.
+
+ Each will be an instance of [CertificateRequest][].
+
+ This collection can also be further filtered by request type using
+ [new_server_requests][] or [new_client_requests][].
+
+ Example usage:
+
+ ```python
+ @when('tls.certs.requested')
+ def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.all_requests if not req.is_handled]
+
+ @property
+ def new_server_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes server cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.server.certs.requested')
+ def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'server']
+
+ @property
+ def new_client_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes client cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.client.certs.requested')
+ def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'client']
+
+ @property
+ def new_application_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes application cert
+ requests.
+
+ Each will be an instance of [ApplicationCertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.application.certs.requested')
+ def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+
+ :returns: List of certificate requests.
+ :rtype: [CertificateRequest, ]
+ """
+ return [req for req in self.new_requests
+ if req.cert_type == 'application']
+
+ @property
+ def all_published_certs(self):
+ """
+ List of all [Certificate][] instances that this provider has published
+ for all related applications.
+ """
+ return [req.cert for req in self.all_requests if req.cert]
diff --git a/kubernetes-master/hooks/relations/tls-certificates/pydocmd.yml b/kubernetes-master/hooks/relations/tls-certificates/pydocmd.yml
new file mode 100644
index 0000000..c568913
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'TLS Certificates Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.TlsRequires+
+ - provides.md:
+ - provides
+ - provides.TlsProvides+
+ - common.md:
+ - tls_certificates_common.CertificateRequest+
+ - tls_certificates_common.Certificate+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Common: common.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/tls-certificates/requires.py b/kubernetes-master/hooks/relations/tls-certificates/requires.py
new file mode 100644
index 0000000..951f953
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/requires.py
@@ -0,0 +1,342 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+import uuid
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+from charms.reactive import Endpoint
+from charms.reactive import data_changed
+
+from .tls_certificates_common import Certificate
+
+
+class TlsRequires(Endpoint):
+ """
+ The client's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+ [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+ [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+ [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+ [server_certs]: requires.md#requires.TlsRequires.server_certs
+ [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+ [client_certs]: requires.md#requires.TlsRequires.server_certs
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ self.relations[0].to_publish_raw['unit_name'] = self._unit_name
+ prefix = self.expand_name('{endpoint_name}.')
+ ca_available = self.root_ca_cert
+ ca_changed = ca_available and data_changed(prefix + 'ca',
+ self.root_ca_cert)
+ server_available = self.server_certs
+ server_changed = server_available and data_changed(prefix + 'servers',
+ self.server_certs)
+ client_available = self.client_certs
+ client_changed = client_available and data_changed(prefix + 'clients',
+ self.client_certs)
+ certs_available = server_available or client_available
+ certs_changed = server_changed or client_changed
+
+ set_flag(prefix + 'available')
+ toggle_flag(prefix + 'ca.available', ca_available)
+ toggle_flag(prefix + 'ca.changed', ca_changed)
+ toggle_flag(prefix + 'server.certs.available', server_available)
+ toggle_flag(prefix + 'server.certs.changed', server_changed)
+ toggle_flag(prefix + 'client.certs.available', client_available)
+ toggle_flag(prefix + 'client.certs.changed', client_changed)
+ toggle_flag(prefix + 'certs.available', certs_available)
+ toggle_flag(prefix + 'certs.changed', certs_changed)
+ # deprecated
+ toggle_flag(prefix + 'server.cert.available', self.server_certs)
+ toggle_flag(prefix + 'client.cert.available', self.get_client_cert())
+ toggle_flag(prefix + 'batch.cert.available', self.server_certs)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ prefix = self.expand_name('{endpoint_name}.')
+ clear_flag(prefix + 'available')
+ clear_flag(prefix + 'ca.available')
+ clear_flag(prefix + 'ca.changed')
+ clear_flag(prefix + 'server.certs.available')
+ clear_flag(prefix + 'server.certs.changed')
+ clear_flag(prefix + 'client.certs.available')
+ clear_flag(prefix + 'client.certs.changed')
+ clear_flag(prefix + 'certs.available')
+ clear_flag(prefix + 'certs.changed')
+ # deprecated
+ clear_flag(prefix + 'server.cert.available')
+ clear_flag(prefix + 'client.cert.available')
+ clear_flag(prefix + 'batch.cert.available')
+
+ @property
+ def _unit_name(self):
+ return hookenv.local_unit().replace('/', '_')
+
+ @property
+ def root_ca_cert(self):
+ """
+ Root CA certificate.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['ca']
+
+ def get_ca(self):
+ """
+ Return the root CA certificate.
+
+ Same as [root_ca_cert][].
+ """
+ return self.root_ca_cert
+
+ @property
+ def root_ca_chain(self):
+ """
+ The chain of trust for the root CA.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['chain']
+
+ def get_chain(self):
+ """
+ Return the chain of trust for the root CA.
+
+ Same as [root_ca_chain][].
+ """
+ return self.root_ca_chain
+
+ def get_client_cert(self):
+ """
+ Deprecated. Use [request_client_cert][] and the [client_certs][]
+ collection instead.
+
+ Return a globally shared client certificate and key.
+ """
+ data = self.all_joined_units.received_raw
+ return (data['client.cert'], data['client.key'])
+
+ def get_server_cert(self):
+ """
+ Deprecated. Use the [server_certs][] collection instead.
+
+ Return the cert and key of the first server certificate requested.
+ """
+ if not self.server_certs:
+ return (None, None)
+ cert = self.server_certs[0]
+ return (cert.cert, cert.key)
+
+ @property
+ def server_certs(self):
+ """
+ List of [Certificate][] instances for all available server certs.
+ """
+ certs = []
+ raw_data = self.all_joined_units.received_raw
+ json_data = self.all_joined_units.received
+
+ # for backwards compatibility, the first cert goes in its own fields
+ if self.relations:
+ common_name = self.relations[0].to_publish_raw['common_name']
+ cert = raw_data['{}.server.cert'.format(self._unit_name)]
+ key = raw_data['{}.server.key'.format(self._unit_name)]
+ if cert and key:
+ certs.append(Certificate('server',
+ common_name,
+ cert,
+ key))
+
+ # subsequent requests go in the collection
+ field = '{}.processed_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ certs.extend(Certificate('server',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items())
+ return certs
+
+ @property
+ def application_certs(self):
+ """
+ List containg the application Certificate cert.
+
+ :returns: A list containing one certificate
+ :rtype: [Certificate()]
+ """
+ certs = []
+ json_data = self.all_joined_units.received
+ field = '{}.processed_application_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ app_cert_data = certs_data.get('app_data')
+ if app_cert_data:
+ certs = [Certificate(
+ 'server',
+ 'app_data',
+ app_cert_data['cert'],
+ app_cert_data['key'])]
+ return certs
+
+ @property
+ def server_certs_map(self):
+ """
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.server_certs}
+
+ def get_batch_requests(self):
+ """
+ Deprecated. Use [server_certs_map][] instead.
+
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return self.server_certs_map
+
+ @property
+ def client_certs(self):
+ """
+ List of [Certificate][] instances for all available client certs.
+ """
+ field = '{}.processed_client_requests'.format(self._unit_name)
+ certs_data = self.all_joined_units.received[field] or {}
+ return [Certificate('client',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items()]
+
+ @property
+ def client_certs_map(self):
+ """
+ Mapping of client [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.client_certs}
+
+ def request_server_cert(self, cn, sans=None, cert_name=None):
+ """
+ Request a server certificate and key be generated for the given
+ common name (`cn`) and optional list of alternative names (`sans`).
+
+ The `cert_name` is deprecated and not needed.
+
+ This can be called multiple times to request more than one server
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ to_publish_raw = self.relations[0].to_publish_raw
+ if to_publish_raw['common_name'] in (None, '', cn):
+ # for backwards compatibility, first request goes in its own fields
+ to_publish_raw['common_name'] = cn
+ to_publish_json['sans'] = sans or []
+ cert_name = to_publish_raw.get('certificate_name') or cert_name
+ if cert_name is None:
+ cert_name = str(uuid.uuid4())
+ to_publish_raw['certificate_name'] = cert_name
+ else:
+ # subsequent requests go in the collection
+ requests = to_publish_json.get('cert_requests', {})
+ requests[cn] = {'sans': sans or []}
+ to_publish_json['cert_requests'] = requests
+
+ def add_request_server_cert(self, cn, sans):
+ """
+ Deprecated. Use [request_server_cert][] instead.
+ """
+ self.request_server_cert(cn, sans)
+
+ def request_server_certs(self):
+ """
+ Deprecated. Just use [request_server_cert][]; this does nothing.
+ """
+ pass
+
+ def request_client_cert(self, cn, sans):
+ """
+ Request a client certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans`).
+
+ This can be called multiple times to request more than one client
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('client_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['client_cert_requests'] = requests
+
+ def request_application_cert(self, cn, sans):
+ """
+ Request an application certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans` ) of this
+ unit and all peer units. All units will share a single certificates.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('application_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['application_cert_requests'] = requests
diff --git a/kubernetes-master/hooks/relations/tls-certificates/tls_certificates_common.py b/kubernetes-master/hooks/relations/tls-certificates/tls_certificates_common.py
new file mode 100644
index 0000000..99a2f8c
--- /dev/null
+++ b/kubernetes-master/hooks/relations/tls-certificates/tls_certificates_common.py
@@ -0,0 +1,302 @@
+from charms.reactive import clear_flag, is_data_changed, data_changed
+
+
+class CertificateRequest(dict):
+ def __init__(self, unit, cert_type, cert_name, common_name, sans):
+ self._unit = unit
+ self._cert_type = cert_type
+ super().__init__({
+ 'certificate_name': cert_name,
+ 'common_name': common_name,
+ 'sans': sans,
+ })
+
+ @property
+ def _key(self):
+ return '.'.join((self._unit.relation.relation_id,
+ self.unit_name,
+ self.common_name))
+
+ def resolve_unit_name(self, unit):
+ """Return name of unit associated with this request.
+
+ unit_name should be provided in the relation data to ensure
+ compatability with cross-model relations. If the unit name
+ is absent then fall back to unit_name attribute of the
+ unit associated with this request.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: Name of unit
+ :rtype: str
+ """
+ unit_name = unit.received_raw['unit_name']
+ if not unit_name:
+ unit_name = unit.unit_name
+ return unit_name
+
+ @property
+ def unit_name(self):
+ """Name of this unit.
+
+ :returns: Name of unit
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).replace('/', '_')
+
+ @property
+ def application_name(self):
+ """Name of the application which the request came from.
+
+ :returns: Name of application
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).split('/')[0]
+
+ @property
+ def cert_type(self):
+ """
+ Type of certificate, 'server' or 'client', being requested.
+ """
+ return self._cert_type
+
+ @property
+ def cert_name(self):
+ return self['certificate_name']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def sans(self):
+ return self['sans']
+
+ @property
+ def _publish_key(self):
+ if self.cert_type == 'server':
+ return '{}.processed_requests'.format(self.unit_name)
+ elif self.cert_type == 'client':
+ return '{}.processed_client_requests'.format(self.unit_name)
+ raise ValueError('Unknown cert_type: {}'.format(self.cert_type))
+
+ @property
+ def _server_cert_key(self):
+ return '{}.server.cert'.format(self.unit_name)
+
+ @property
+ def _server_key_key(self):
+ return '{}.server.key'.format(self.unit_name)
+
+ @property
+ def _is_top_level_server_cert(self):
+ return (self.cert_type == 'server' and
+ self.common_name == self._unit.received_raw['common_name'])
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+ """
+ cert, key = None, None
+ if self._is_top_level_server_cert:
+ tpr = self._unit.relation.to_publish_raw
+ cert = tpr[self._server_cert_key]
+ key = tpr[self._server_key_key]
+ else:
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get(self.common_name, {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ def set_cert(self, cert, key):
+ rel = self._unit.relation
+ if self._is_top_level_server_cert:
+ # backwards compatibility; if this is the cert that was requested
+ # as a single server cert, set it in the response as the single
+ # server cert
+ rel.to_publish_raw.update({
+ self._server_cert_key: cert,
+ self._server_key_key: key,
+ })
+ else:
+ data = rel.to_publish.get(self._publish_key, {})
+ data[self.common_name] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[self._publish_key] = data
+ if not rel.endpoint.new_server_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.server'
+ '.cert.requested'))
+ if not rel.endpoint.new_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.'
+ 'certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class ApplicationCertificateRequest(CertificateRequest):
+ """
+ A request for an application consistent certificate.
+
+ This is a request for a certificate that works for all units of an
+ application. All sans and cns are added together to produce one
+ certificate and the same certificate and key are sent to all the
+ units of an application. Only one ApplicationCertificateRequest
+ is needed per application.
+ """
+
+ @property
+ def _key(self):
+ """Key to identify this cert.
+
+ :returns: cert key
+ :rtype: str
+ """
+ return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert')
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+
+ :returns: Certificate
+ :rtype: Certificate or None
+ """
+ cert, key = None, None
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get('app_data', {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ """Whether the certificate has been handled.
+
+ :returns: If the cert has been handled
+ :rtype: bool
+ """
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ @property
+ def sans(self):
+ """Generate a list of all sans from all units of application
+
+ Examine all units of the application and compile a list of
+ all sans. CNs are treated as addition san entries.
+
+ :returns: List of sans
+ :rtype: List[str]
+ """
+ _sans = []
+ for unit in self._unit.relation.units:
+ reqs = unit.received['application_cert_requests'] or {}
+ for cn, req in reqs.items():
+ _sans.append(cn)
+ _sans.extend(req['sans'])
+ return sorted(list(set(_sans)))
+
+ @property
+ def _request_key(self):
+ """Key used to request cert
+
+ :returns: Key used to request cert
+ :rtype: str
+ """
+ return 'application_cert_requests'
+
+ def derive_publish_key(self, unit=None):
+ """Derive the application cert publish key for a unit.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: publish key
+ :rtype: str
+ """
+ if not unit:
+ unit = self._unit
+ unit_name = self.resolve_unit_name(unit).replace('/', '_')
+ return '{}.processed_application_requests'.format(unit_name)
+
+ @property
+ def _publish_key(self):
+ """Key used to publish cert
+
+ :returns: Key used to publish cert
+ :rtype: str
+ """
+ return self.derive_publish_key(unit=self._unit)
+
+ def set_cert(self, cert, key):
+ """Send the cert and key to all units of the application
+
+ :param cert: TLS Certificate
+ :type cert: str
+ :param key: TLS Private Key
+ :type cert: str
+ """
+ rel = self._unit.relation
+ for unit in self._unit.relation.units:
+ pub_key = self.derive_publish_key(unit=unit)
+ data = rel.to_publish.get(
+ pub_key,
+ {})
+ data['app_data'] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[pub_key] = data
+ if not rel.endpoint.new_application_requests:
+ clear_flag(rel.endpoint.expand_name(
+ '{endpoint_name}.application.certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class Certificate(dict):
+ """
+ Represents a created certificate and key.
+
+ The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+ be accessed either as properties or as the contents of the dict.
+ """
+ def __init__(self, cert_type, common_name, cert, key):
+ super().__init__({
+ 'cert_type': cert_type,
+ 'common_name': common_name,
+ 'cert': cert,
+ 'key': key,
+ })
+
+ @property
+ def cert_type(self):
+ return self['cert_type']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def cert(self):
+ return self['cert']
+
+ @property
+ def key(self):
+ return self['key']
diff --git a/kubernetes-master/hooks/relations/vault-kv/.gitignore b/kubernetes-master/hooks/relations/vault-kv/.gitignore
new file mode 100644
index 0000000..9dd3eb8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/.gitignore
@@ -0,0 +1,2 @@
+.tox
+.testrepository
diff --git a/kubernetes-master/hooks/relations/vault-kv/README.md b/kubernetes-master/hooks/relations/vault-kv/README.md
new file mode 100644
index 0000000..f09c312
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/README.md
@@ -0,0 +1,52 @@
+# Overview
+
+This interface handles the communication with the vault charm using the
+vault-kv interface type.
+
+Vault will enable simple KV based secrets backends with AppRole based
+authentication and policies to allow consuming charms to store and retrieve
+secrets in Vault.
+
+Access to the backend will be limited to the network address binding of
+of the relation endpoint name and ownership of a secret\_id which the
+consuming application must retrieve using a one-shot token out-of-band
+from Juju.
+
+# Usage
+
+## Requires
+
+The interface layer will set the following reactive states, as appropriate:
+
+ * `{relation_name}.connected` The relation is established and ready for
+ the local charm to make a request for access to a secrets backend using
+ the `request_secret_backend` method.
+
+ * `{relation_name}.available` When vault has created the backend and an
+ associated AppRole to allow the local charm to store and retrieve secrets
+ in vault - the `vault_url` and `unit_role_id` properties will be set.
+
+ For example:
+
+```python
+from charms.reactive.flags import endpoint_from_flag
+
+ @when('secrets-storage.connected')
+ def ss_connected():
+ secrets = endpoint_from_flag('secrets-storage.connected')
+ secrets.request_secret_backend('charm-vaultlocker', isolated=True)
+
+
+ @when('secrets-storage.available')
+ def ss_ready_for_use():
+ secrets = endpoint_from_flag('secrets-storage.connected')
+ configure_my_local_service(
+ vault_url=secrets.vault_url,
+ role_id=secrets.unit_role_id,
+ secret_id=vault.get_response(secrets.unit_token),
+ backend='charm-vaultlocker',
+ )
+ ```
+
+ Note that the backend name must be prefixed with 'charm-' otherwise the vault
+ charm will skip creation of the secrets backend and associated access.
diff --git a/kubernetes-master/hooks/relations/vault-kv/__init__.py b/kubernetes-master/hooks/relations/vault-kv/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/vault-kv/copyright b/kubernetes-master/hooks/relations/vault-kv/copyright
new file mode 100644
index 0000000..32a8f52
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/copyright
@@ -0,0 +1,21 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
+
+Files: *
+Copyright: 2018, Canonical Ltd.
+License: Apache-2.0
+
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in `/usr/share/common-licenses/Apache-2.0'.
diff --git a/kubernetes-master/hooks/relations/vault-kv/interface.yaml b/kubernetes-master/hooks/relations/vault-kv/interface.yaml
new file mode 100644
index 0000000..b03cb19
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/interface.yaml
@@ -0,0 +1,4 @@
+name: vault-kv
+summary: Vault simple Key/Value secret storage interface
+version: 1
+maintainer: "James Page "
diff --git a/kubernetes-master/hooks/relations/vault-kv/provides.py b/kubernetes-master/hooks/relations/vault-kv/provides.py
new file mode 100644
index 0000000..8039448
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/provides.py
@@ -0,0 +1,94 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import is_flag_set, toggle_flag, clear_flag
+from charms.reactive import Endpoint
+from charmhelpers import core as ch_core
+
+
+class VaultKVProvides(Endpoint):
+ def manage_flags(self):
+ any_fields_changed = False
+ for field in ('access_address',
+ 'secret_backend',
+ 'hostname',
+ 'isolated'):
+ flag = self.expand_name('endpoint.{endpoint_name}.'
+ 'changed.{}').format(field)
+ any_fields_changed = any_fields_changed or is_flag_set(flag)
+ clear_flag(flag)
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.new-request'),
+ any_fields_changed)
+
+ def publish_url(self, vault_url, remote_binding=None):
+ """ Publish URL for Vault to all Relations
+
+ :param vault_url: api url used by remote client to speak to vault.
+ :param remote_binding: Deprecated
+ """
+ if remote_binding:
+ ch_core.hookenv.log(
+ "Use of remote_binding in publish_url is deprecated. "
+ "See LP Bug #1895185", "WARNING")
+ for relation in self.relations:
+ relation.to_publish['vault_url'] = vault_url
+
+ def publish_ca(self, vault_ca):
+ """ Publish SSL CA for Vault to all Relations """
+ for relation in self.relations:
+ relation.to_publish['vault_ca'] = vault_ca
+
+ def get_remote_unit_name(self, unit):
+ """Get the remote units name.
+
+ :param unit: Unit to get name for.
+ :type name: Unit
+ :returns: Unit name
+ :rtype: str
+ """
+ return unit.received.get('unit_name') or unit.unit_name
+
+ def set_role_id(self, unit, role_id, token):
+ """ Set the AppRole ID and token for out-of-band Secret ID retrieval
+ for a specific remote unit """
+ # for cmr we will need to the other end to provide their unit name
+ # expicitly.
+ unit_name = self.get_remote_unit_name(unit)
+ unit.relation.to_publish['{}_role_id'.format(unit_name)] = role_id
+ unit.relation.to_publish['{}_token'.format(unit_name)] = token
+
+ def requests(self):
+ """ Retrieve full set of setup requests from all remote units """
+ requests = []
+ for relation in self.relations:
+ for unit in relation.units:
+ access_address = unit.received['access_address']
+ ingress_address = unit.received['ingress-address']
+ secret_backend = unit.received['secret_backend']
+ hostname = unit.received['hostname']
+ isolated = unit.received['isolated']
+ unit_name = self.get_remote_unit_name(unit)
+ if not (secret_backend and access_address and
+ hostname and isolated is not None):
+ continue
+ requests.append({
+ 'unit': unit,
+ 'unit_name': unit_name,
+ 'access_address': access_address,
+ 'ingress_address': ingress_address,
+ 'secret_backend': secret_backend,
+ 'hostname': hostname,
+ 'isolated': isolated,
+ })
+ return requests
diff --git a/kubernetes-master/hooks/relations/vault-kv/requires.py b/kubernetes-master/hooks/relations/vault-kv/requires.py
new file mode 100644
index 0000000..550d8ef
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/requires.py
@@ -0,0 +1,108 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import socket
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import toggle_flag
+from charms.reactive import Endpoint
+
+
+class VaultKVRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ all([self.is_joined,
+ self.unit_role_id,
+ self.unit_token,
+ self.vault_url]))
+
+ @property
+ def endpoint_address(self):
+ """ Determine the local endpoint network address """
+ try:
+ return hookenv.network_get_primary_address(
+ self.expand_name('{endpoint_name}')
+ )
+ except NotImplementedError:
+ return hookenv.unit_private_ip()
+
+ def request_secret_backend(self, name, isolated=True):
+ """Request creation and access to a secret backend
+
+ :param name: name of secret backend to create/access
+ :type name: str
+ :param isolated: enforce isolation in backend between units
+ :type isolated: bool"""
+ for relation in self.relations:
+ relation.to_publish['secret_backend'] = name
+ relation.to_publish['access_address'] = self.endpoint_address
+ relation.to_publish['hostname'] = socket.gethostname()
+ relation.to_publish['isolated'] = isolated
+ relation.to_publish['unit_name'] = hookenv.local_unit()
+
+ @property
+ def unit_role_id(self):
+ """Retrieve the AppRole ID for this application unit or None
+
+ :returns role_id: AppRole ID for unit
+ :rtype role_id: str"""
+ role_key = '{}_role_id'.format(hookenv.local_unit())
+ return self.all_joined_units.received.get(role_key)
+
+ @property
+ def unit_token(self):
+ """Retrieve the one-shot token for secret_id retrieval for
+ this application unit or None
+
+ :returns token: Vault one-shot toekn for secret_id response
+ :rtype token: str"""
+ token_key = '{}_token'.format(hookenv.local_unit())
+ return self.all_joined_units.received.get(token_key)
+
+ @property
+ def all_unit_tokens(self):
+ """Retrieve the one-shot token(s) for secret_id retrieval for
+ all application units or empty list.
+
+ :returns token: Vault one-shot token for secret_id response
+ :rtype token: str"""
+ token_key = '{}_token'.format(hookenv.local_unit())
+ tokens = set()
+ for relation in self.relations:
+ for unit in relation.units:
+ token = unit.received.get(token_key)
+ if token:
+ tokens.add(token)
+
+ return list(tokens)
+
+ @property
+ def vault_url(self):
+ """Retrieve the URL to access Vault
+
+ :returns vault_url: URL to access vault
+ :rtype vault_url: str"""
+ return self.all_joined_units.received.get('vault_url')
+
+ @property
+ def vault_ca(self):
+ """Retrieve the CA published by Vault
+
+ :returns vault_ca: Vault CA Certificate data
+ :rtype vault_ca: str"""
+ encoded_ca = self.all_joined_units.received.get('vault_ca')
+ if encoded_ca:
+ return base64.b64decode(encoded_ca)
diff --git a/kubernetes-master/hooks/relations/vault-kv/test-requirements.txt b/kubernetes-master/hooks/relations/vault-kv/test-requirements.txt
new file mode 100644
index 0000000..db5ef38
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vault-kv/test-requirements.txt
@@ -0,0 +1,2 @@
+flake8>=2.2.4
+os-testr>=0.4.1
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/.gitignore b/kubernetes-master/hooks/relations/vsphere-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/LICENSE b/kubernetes-master/hooks/relations/vsphere-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/README.md b/kubernetes-master/hooks/relations/vsphere-integration/README.md
new file mode 100644
index 0000000..28ff438
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `vsphere-integration` interface communication
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:vsphere-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:vsphere-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `vsphere-integration` interface protocol:
+
+```yaml
+requires:
+ vsphere:
+ interface: vsphere-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the vsphere-integrator charm)
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/__init__.py b/kubernetes-master/hooks/relations/vsphere-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/copyright b/kubernetes-master/hooks/relations/vsphere-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/docs/provides.md b/kubernetes-master/hooks/relations/vsphere-integration/docs/provides.md
new file mode 100644
index 0000000..796b7e6
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/docs/provides.md
@@ -0,0 +1,74 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the
+vSphere integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for vSphere integration
+ features. The vSphere integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+
+
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
mark_completed
+
+```python
+VsphereIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(self, vsphere_ip, user, password, datacenter, datastore)
+```
+
+Set the credentials for this request.
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/docs/requires.md b/kubernetes-master/hooks/relations/vsphere-integration/docs/requires.md
new file mode 100644
index 0000000..0ce10a9
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/docs/requires.md
@@ -0,0 +1,56 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with vSphere native features. The integration will be
+provided by the vSphere integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of vSphere
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific vSphere features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the vSphere instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+
VsphereIntegrationRequires
+
+```python
+VsphereIntegrationRequires(self, endpoint_name, relation_ids=None)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.vsphere.ready')
+def vsphere_integration_ready():
+ vsphere = endpoint_from_flag('endpoint.vsphere.joined')
+ update_config_enable_vsphere(vsphere.vsphere_ip,
+ vsphere.user,
+ vsphere.password,
+ vsphere.datacenter,
+ vsphere.datastore)
+```
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/interface.yaml b/kubernetes-master/hooks/relations/vsphere-integration/interface.yaml
new file mode 100644
index 0000000..c4c0c07
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: vsphere-integration
+summary: Interface for connecting to the VMware vSphere integrator charm.
+version: 1
+maintainer: Kevin Monroe
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/make_docs b/kubernetes-master/hooks/relations/vsphere-integration/make_docs
new file mode 100644
index 0000000..04cf35b
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'vsphere': {'interface': 'vsphere'}},
+ 'provides': {'vsphere': {'interface': 'vsphere'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/provides.py b/kubernetes-master/hooks/relations/vsphere-integration/provides.py
new file mode 100644
index 0000000..c3db1d8
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/provides.py
@@ -0,0 +1,132 @@
+"""
+This is the provides side of the interface layer, for use only by the
+vSphere integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for vSphere integration
+ features. The vSphere integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class VsphereIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.vsphere.requests-pending')
+ def handle_requests():
+ vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending')
+ for request in vsphere.requests:
+ request.set_credentials(layer.vsphere.get_vsphere_credentials())
+ request.set_config(layer.vsphere.get_vsphere_config())
+ vsphere.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.new_requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def all_requests(self):
+ """
+ A list of all the #IntegrationRequests that have been made.
+ """
+ return [IntegrationRequest(unit) for unit in self.all_joined_units]
+
+ @property
+ def new_requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that have been made.
+ """
+ is_changed = attrgetter('is_changed')
+ return list(filter(is_changed, self.all_requests))
+
+ def mark_completed(self):
+ """
+ Remove the `requests-pending` flag.
+ """
+ clear_flag(self.expand_name('requests-pending'))
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not `set_credentials` has been called.
+ """
+ return {'vsphere_ip', 'user',
+ 'password', 'datacenter'}.issubset(self._to_publish)
+
+ @property
+ def has_config(self):
+ """
+ Whether or not `set_config` has been called.
+ """
+ return {'datastore', 'folder',
+ 'respool_path'}.issubset(self._to_publish)
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ return not (self.has_credentials and self.has_config)
+
+ @property
+ def unit_name(self):
+ return self._unit.unit_name
+
+ def set_credentials(self,
+ vsphere_ip,
+ user,
+ password,
+ datacenter):
+ """
+ Set the vsphere credentials for this request.
+ """
+ self._to_publish.update({
+ 'vsphere_ip': vsphere_ip,
+ 'user': user,
+ 'password': password,
+ 'datacenter': datacenter,
+ })
+
+ def set_config(self,
+ datastore,
+ folder,
+ respool_path):
+ """
+ Set the non-credential vsphere config for this request.
+ """
+ self._to_publish.update({
+ 'datastore': datastore,
+ 'folder': folder,
+ 'respool_path': respool_path,
+ })
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/pydocmd.yml b/kubernetes-master/hooks/relations/vsphere-integration/pydocmd.yml
new file mode 100644
index 0000000..e1d5d4a
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'VMware vSphere Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.VsphereIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.VsphereIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-master/hooks/relations/vsphere-integration/requires.py b/kubernetes-master/hooks/relations/vsphere-integration/requires.py
new file mode 100644
index 0000000..d8b9cdb
--- /dev/null
+++ b/kubernetes-master/hooks/relations/vsphere-integration/requires.py
@@ -0,0 +1,141 @@
+"""
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with vSphere native features. The integration will be
+provided by the vSphere integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of vSphere
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific vSphere features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the vSphere instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+"""
+
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, is_flag_set, set_flag, toggle_flag
+from charms.reactive import data_changed
+
+
+class VsphereIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.vsphere.ready')
+ def vsphere_integration_ready():
+ vsphere = endpoint_from_flag('endpoint.vsphere.joined')
+ update_config_enable_vsphere(vsphere.vsphere_ip,
+ vsphere.user,
+ vsphere.password,
+ vsphere.datacenter,
+ vsphere.datastore,
+ vsphere.folder,
+ vsphere.respool_path)
+ ```
+ """
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data.
+ """
+ return self.all_joined_units.received
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ """
+ Manage flags to signal when the endpoint is ready as well as noting
+ if changes have been made since it became ready.
+ """
+ was_ready = is_flag_set(self.expand_name('ready'))
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ if self.is_ready and was_ready and self.is_changed:
+ set_flag(self.expand_name('ready.changed'))
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ return all(field is not None for field in [
+ self.vsphere_ip,
+ self.user,
+ self.password,
+ self.datacenter,
+ self.datastore,
+ self.folder,
+ self.respool_path,
+ ])
+
+ @property
+ def is_changed(self):
+ """
+ Whether or not the request for this instance has changed.
+ """
+ return data_changed(self.expand_name('all-data'), [
+ self.vsphere_ip,
+ self.user,
+ self.password,
+ self.datacenter,
+ self.datastore,
+ self.folder,
+ self.respool_path,
+ ])
+
+ @property
+ def vsphere_ip(self):
+ return self._received['vsphere_ip']
+
+ @property
+ def user(self):
+ return self._received['user']
+
+ @property
+ def password(self):
+ return self._received['password']
+
+ @property
+ def datacenter(self):
+ return self._received['datacenter']
+
+ @property
+ def datastore(self):
+ return self._received['datastore']
+
+ @property
+ def folder(self):
+ return self._received['folder']
+
+ @property
+ def respool_path(self):
+ return self._received['respool_path']
diff --git a/kubernetes-master/hooks/start b/kubernetes-master/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/stop b/kubernetes-master/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/update-status b/kubernetes-master/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/upgrade-charm b/kubernetes-master/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vault-kv-relation-broken b/kubernetes-master/hooks/vault-kv-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vault-kv-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vault-kv-relation-changed b/kubernetes-master/hooks/vault-kv-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vault-kv-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vault-kv-relation-created b/kubernetes-master/hooks/vault-kv-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vault-kv-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vault-kv-relation-departed b/kubernetes-master/hooks/vault-kv-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vault-kv-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vault-kv-relation-joined b/kubernetes-master/hooks/vault-kv-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vault-kv-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vsphere-relation-broken b/kubernetes-master/hooks/vsphere-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vsphere-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vsphere-relation-changed b/kubernetes-master/hooks/vsphere-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vsphere-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vsphere-relation-created b/kubernetes-master/hooks/vsphere-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vsphere-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vsphere-relation-departed b/kubernetes-master/hooks/vsphere-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vsphere-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/hooks/vsphere-relation-joined b/kubernetes-master/hooks/vsphere-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-master/hooks/vsphere-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-master/icon.svg b/kubernetes-master/icon.svg
new file mode 100644
index 0000000..0ab453f
--- /dev/null
+++ b/kubernetes-master/icon.svg
@@ -0,0 +1,362 @@
+
+
+
+
diff --git a/kubernetes-master/layer.yaml b/kubernetes-master/layer.yaml
new file mode 100644
index 0000000..1d6db2a
--- /dev/null
+++ b/kubernetes-master/layer.yaml
@@ -0,0 +1,97 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "layer:debug"
+- "interface:tls-certificates"
+- "interface:nrpe-external-master"
+- "layer:cis-benchmark"
+- "layer:coordinator"
+- "layer:kubernetes-common"
+- "interface:container-runtime"
+- "interface:vault-kv"
+- "layer:status"
+- "layer:apt"
+- "layer:vault-kv"
+- "interface:hacluster"
+- "layer:snap"
+- "layer:tls-client"
+- "layer:leadership"
+- "layer:metrics"
+- "layer:nagios"
+- "layer:cdk-service-kicker"
+- "layer:kubernetes-master-worker-base"
+- "layer:vaultlocker"
+- "layer:hacluster"
+- "interface:ceph-admin"
+- "interface:ceph-client"
+- "interface:etcd"
+- "interface:http"
+- "interface:kubernetes-cni"
+- "interface:kube-dns"
+- "interface:kube-control"
+- "interface:kube-masters"
+- "interface:public-address"
+- "interface:aws-integration"
+- "interface:gcp-integration"
+- "interface:openstack-integration"
+- "interface:vsphere-integration"
+- "interface:azure-integration"
+- "interface:keystone-credentials"
+- "interface:prometheus-manual"
+- "interface:grafana-dashboard"
+- "interface:aws-iam"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests",
+ ".tox", "__pycache__", "Makefile", "conftest.py"]
+"options":
+ "coordinator":
+ # Absolute path to the charmhelpers.coordinator.BaseCoordinator to use.
+ "class": "charms.coordinator.SimpleCoordinator"
+ # Layer log level (debug, info, warning, error, critical)
+ "log_level": "info"
+ "basic":
+ "packages":
+ - "socat"
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "tls-client":
+ "ca_certificate_path": "/root/cdk/ca.crt"
+ "server_certificate_path": ""
+ "server_key_path": ""
+ "client_certificate_path": ""
+ "client_key_path": ""
+ "cdk-service-kicker":
+ "services":
+ - "snap.kube-apiserver.daemon"
+ - "snap.kube-controller-manager.daemon"
+ - "snap.kube-scheduler.daemon"
+ - "snap.kube-proxy.daemon"
+ "hacluster":
+ "binding_address": "kube-api-endpoint"
+ "snap": {}
+ "debug": {}
+ "leadership": {}
+ "nagios": {}
+ "cis-benchmark": {}
+ "kubernetes-common": {}
+ "kubernetes-master-worker-base": {}
+ "vault-kv": {}
+ "status":
+ "patch-hookenv": !!bool "true"
+ "apt":
+ "packages": []
+ "version_package": ""
+ "full_version": !!bool "false"
+ "keys": []
+ "vaultlocker": {}
+ "kubernetes-master": {}
+"repo": "https://github.com/kubernetes/kubernetes.git"
+"proof":
+ "storage":
+ - "name": "vaultlocker-encrypt"
+ "type": "Boolean"
+ "missing": !!bool "false"
+ - "name": "vaultlocker-mountbase"
+ "type": "String"
+ "missing": ""
+"is": "kubernetes-master"
diff --git a/kubernetes-master/lib/charms/apt.py b/kubernetes-master/lib/charms/apt.py
new file mode 100644
index 0000000..14508c4
--- /dev/null
+++ b/kubernetes-master/lib/charms/apt.py
@@ -0,0 +1,209 @@
+# Copyright 2015-2020 Canonical Ltd.
+#
+# This file is part of the Apt layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+'''
+charms.reactive helpers for dealing with deb packages.
+
+Add apt package sources using add_source(). Queue deb packages for
+installation with install(). Configure and work with your software
+once the apt.installed.{packagename} flag is set.
+'''
+import itertools
+import re
+import subprocess
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv, unitdata
+from charms import layer, reactive
+from charms.layer import status
+from charms.reactive import flags
+
+
+__all__ = ['add_source', 'update', 'queue_install', 'install_queued', 'installed', 'purge', 'ensure_package_status']
+
+
+def add_source(source, key=None):
+ '''Add an apt source.
+
+ Sets the apt.needs_update flag.
+
+ A source may be either a line that can be added directly to
+ sources.list(5), or in the form ppa:/ for adding
+ Personal Package Archives, or a distribution component to enable.
+
+ The package signing key should be an ASCII armoured GPG key. While
+ GPG key ids are also supported, the retrieval mechanism is insecure.
+ There is no need to specify the package signing key for PPAs or for
+ the main Ubuntu archives.
+ '''
+ # Maybe we should remember which sources have been added already
+ # so we don't waste time re-adding them. Is this time significant?
+ fetch.add_source(source, key)
+ reactive.set_flag('apt.needs_update')
+
+
+def queue_install(packages, options=None):
+ """Queue one or more deb packages for install.
+
+ The `apt.installed.{name}` flag is set once the package is installed.
+
+ If a package has already been installed it will not be reinstalled.
+
+ If a package has already been queued it will not be requeued, and
+ the install options will not be changed.
+
+ Sets the apt.queued_installs flag.
+ """
+ if isinstance(packages, str):
+ packages = [packages]
+ # Filter installed packages.
+ store = unitdata.kv()
+ queued_packages = store.getrange('apt.install_queue.', strip=True)
+ packages = {
+ package: options
+ for package in packages
+ if not (package in queued_packages or reactive.is_flag_set('apt.installed.' + package))
+ }
+ if packages:
+ unitdata.kv().update(packages, prefix='apt.install_queue.')
+ reactive.set_flag('apt.queued_installs')
+
+
+def installed():
+ '''Return the set of deb packages completed install'''
+ return set(flag.split('.', 2)[2] for flag in flags.get_flags() if flag.startswith('apt.installed.'))
+
+
+def purge(packages):
+ """Purge one or more deb packages from the system"""
+ fetch.apt_purge(packages, fatal=True)
+ store = unitdata.kv()
+ store.unsetrange(packages, prefix='apt.install_queue.')
+ for package in packages:
+ reactive.clear_flag('apt.installed.{}'.format(package))
+
+
+def update():
+ """Update the apt cache.
+
+ Removes the apt.needs_update flag.
+ """
+ status.maintenance('Updating apt cache')
+ fetch.apt_update(fatal=True) # Friends don't let friends set fatal=False
+ reactive.clear_flag('apt.needs_update')
+
+
+def install_queued():
+ '''Installs queued deb packages.
+
+ Removes the apt.queued_installs flag and sets the apt.installed flag.
+
+ On failure, sets the unit's workload status to 'blocked' and returns
+ False. Package installs remain queued.
+
+ On success, sets the apt.installed.{packagename} flag for each
+ installed package and returns True.
+ '''
+ store = unitdata.kv()
+ queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items())
+
+ installed = set()
+ for options, batch in itertools.groupby(queue, lambda x: x[0]):
+ packages = [b[1] for b in batch]
+ try:
+ status.maintenance('Installing {}'.format(','.join(packages)))
+ fetch.apt_install(packages, options, fatal=True)
+ store.unsetrange(packages, prefix='apt.install_queue.')
+ installed.update(packages)
+ except subprocess.CalledProcessError:
+ status.blocked('Unable to install packages {}'.format(','.join(packages)))
+ return False # Without setting reactive flag.
+
+ for package in installed:
+ reactive.set_flag('apt.installed.{}'.format(package))
+ reactive.clear_flag('apt.queued_installs')
+
+ reset_application_version()
+
+ return True
+
+
+def get_package_version(package, full_version=False):
+ '''Return the version of an installed package.
+
+ If `full_version` is True, returns the full Debian package version.
+ Otherwise, returns the shorter 'upstream' version number.
+ '''
+ # Don't use fetch.get_upstream_version, as it depends on python-apt
+ # and not available if the basic layer's use_site_packages option is off.
+ cmd = ['dpkg-query', '--show', r'--showformat=${Version}\n', package]
+ full = subprocess.check_output(cmd, universal_newlines=True).strip()
+ if not full_version:
+ # Attempt to strip off Debian style metadata from the end of the
+ # version number.
+ m = re.search(r'^([\d.a-z]+)', full, re.I)
+ if m is not None:
+ return m.group(1)
+ return full
+
+
+def reset_application_version():
+ '''Set the Juju application version, per settings in layer.yaml'''
+ # Reset the application version. We call this after installing
+ # packages to initialize the version. We also call this every
+ # hook, incase the version has changed (eg. Landscape upgraded
+ # the package).
+ opts = layer.options().get('apt', {})
+ pkg = opts.get('version_package')
+ if pkg and pkg in installed():
+ ver = get_package_version(pkg, opts.get('full_version', False))
+ hookenv.application_version_set(ver)
+
+
+def ensure_package_status():
+ '''Hold or unhold packages per the package_status configuration option.
+
+ All packages installed using this module and handlers are affected.
+
+ An mechanism may be added in the future to override this for a
+ subset of installed packages.
+ '''
+ packages = installed()
+ if not packages:
+ return
+ config = hookenv.config()
+ package_status = config.get('package_status') or ''
+ changed = reactive.data_changed('apt.package_status', (package_status, sorted(packages)))
+ if changed:
+ if package_status == 'hold':
+ hookenv.log('Holding packages {}'.format(','.join(packages)))
+ fetch.apt_hold(packages)
+ else:
+ hookenv.log('Unholding packages {}'.format(','.join(packages)))
+ fetch.apt_unhold(packages)
+ reactive.clear_flag('apt.needs_hold')
+
+
+def status_set(state, message):
+ '''DEPRECATED, set the unit's workload status.
+
+ Set state == None to keep the same state and just change the message.
+ '''
+ if state is None:
+ state = hookenv.status_get()[0]
+ if state not in ('active', 'waiting', 'blocked'):
+ state = 'maintenance' # Guess
+ status.status_set(state, message)
diff --git a/kubernetes-master/lib/charms/coordinator.py b/kubernetes-master/lib/charms/coordinator.py
new file mode 100644
index 0000000..b954b92
--- /dev/null
+++ b/kubernetes-master/lib/charms/coordinator.py
@@ -0,0 +1,144 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import importlib
+
+from charmhelpers.coordinator import BaseCoordinator
+from charmhelpers.core import hookenv
+from charms import reactive
+import charms.layer
+
+
+__all__ = ['coordinator', 'acquire']
+
+
+def acquire(lock):
+ """
+ Sets either the coordinator.granted.{lockname} or
+ coordinator.requested.{lockname} state.
+
+ Returns True if the lock could be immediately granted.
+
+ If locks cannot be granted immediately, they will be granted
+ in a future hook and the coordinator.granted.{lockname} state set.
+ """
+ global coordinator
+ if coordinator.acquire(lock):
+ s = 'coordinator.granted.{}'.format(lock)
+ if not reactive.is_state(s):
+ log('Granted {} lock'.format(lock), hookenv.DEBUG)
+ reactive.set_state('coordinator.granted.{}'.format(lock))
+ return True
+ else:
+ log('Requested {} lock'.format(lock), hookenv.DEBUG)
+ reactive.set_state('coordinator.requested.{}'.format(lock))
+ return False
+
+
+options = charms.layer.options('coordinator')
+
+
+def log(msg, level=hookenv.INFO):
+ lmap = {hookenv.DEBUG: 1,
+ hookenv.INFO: 2,
+ hookenv.WARNING: 3,
+ hookenv.ERROR: 4,
+ hookenv.CRITICAL: 5}
+ if lmap[level] >= lmap[options.get('log_level', 'DEBUG').upper()]:
+ hookenv.log('Coordinator: {}'.format(msg), level)
+
+
+class SimpleCoordinator(BaseCoordinator):
+ '''A simple BaseCoordinator that is suitable for almost all cases.
+
+ Only one unit at a time will be granted locks. All requests by that
+ unit will be granted. So only one unit may run tasks guarded by a lock,
+ and the lock name is irrelevant.
+ '''
+ def default_grant(self, lock, unit, granted, queue):
+ '''Grant locks to only one unit at a time, regardless of the lock name.
+
+ This lets us keep separate locks like join and restart,
+ while ensuring the operations do not occur on different nodes
+ at the same time.
+ '''
+ existing_grants = {k: v for k, v in self.grants.items() if v}
+
+ # Return True if this unit has already been granted any lock.
+ if existing_grants.get(unit):
+ self.msg('Granting {} to {} (existing grants)'.format(lock, unit),
+ hookenv.INFO)
+ return True
+
+ # Return False if another unit has been granted any lock.
+ if existing_grants:
+ self.msg('Not granting {} to {} (locks held by {})'
+ ''.format(lock, unit, ','.join(existing_grants.keys())),
+ hookenv.INFO)
+ return False
+
+ # Otherwise, return True if the unit is first in the queue for
+ # this named lock.
+ if queue[0] == unit:
+ self.msg('Granting {} to {} (first in queue)'
+ ''.format(lock, unit), hookenv.INFO)
+ return True
+ else:
+ self.msg('Not granting {} to {} (not first in queue)'
+ ''.format(lock, unit), hookenv.INFO)
+ return False
+
+ def msg(self, msg, level=hookenv.DEBUG):
+ '''Emit a message.'''
+ log(msg, level)
+
+ def _save_state(self):
+ # If the leader aquired a lock, and now released it,
+ # there may be outstanding requests in the queue from other
+ # units. We need to grant them now, as we have no guarantee
+ # of another hook running on the leader for some time (until
+ # update-status).
+ self.handle()
+ super(SimpleCoordinator, self)._save_state()
+
+
+def _instantiate():
+ default_name = 'charms.coordinator.SimpleCoordinator'
+ full_name = options.get('class', default_name)
+ components = full_name.split('.')
+ module = '.'.join(components[:-1])
+ name = components[-1]
+
+ if not module:
+ module = 'charms.coordinator'
+
+ class_ = getattr(importlib.import_module(module), name)
+
+ assert issubclass(class_, BaseCoordinator), \
+ '{} is not a BaseCoordinator subclass'.format(full_name)
+
+ try:
+ # The Coordinator layer defines its own peer relation, as it
+ # can't piggy back on an existing peer relation that may not
+ # exist.
+ return class_(peer_relation_name='coordinator')
+ finally:
+ log('Using {} coordinator'.format(full_name), hookenv.DEBUG)
+
+
+# Instantiate the BaseCoordinator singleton, which installs
+# its charmhelpers.core.atstart() hooks.
+coordinator = _instantiate()
diff --git a/kubernetes-master/lib/charms/layer/__init__.py b/kubernetes-master/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/kubernetes-master/lib/charms/layer/basic.py b/kubernetes-master/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/kubernetes-master/lib/charms/layer/execd.py b/kubernetes-master/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<>> `get_version('kubelet')
+ (1, 6, 0)
+
+ """
+ cmd = '{} --version'.format(bin_name).split()
+ version_string = subprocess.check_output(cmd).decode('utf-8')
+ return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
+
+
+def retry(times, delay_secs):
+ """ Decorator for retrying a method call.
+
+ Args:
+ times: How many times should we retry before giving up
+ delay_secs: Delay in secs
+
+ Returns: A callable that would return the last call outcome
+ """
+
+ def retry_decorator(func):
+ """ Decorator to wrap the function provided.
+
+ Args:
+ func: Provided function should return either True od False
+
+ Returns: A callable that would return the last call outcome
+
+ """
+ def _wrapped(*args, **kwargs):
+ res = func(*args, **kwargs)
+ attempt = 0
+ while not res and attempt < times:
+ sleep(delay_secs)
+ res = func(*args, **kwargs)
+ if res:
+ break
+ attempt += 1
+ return res
+ return _wrapped
+
+ return retry_decorator
+
+
+def calculate_resource_checksum(resource):
+ ''' Calculate a checksum for a resource '''
+ md5 = hashlib.md5()
+ path = hookenv.resource_get(resource)
+ if path:
+ with open(path, 'rb') as f:
+ data = f.read()
+ md5.update(data)
+ return md5.hexdigest()
+
+
+def get_resource_checksum_db_key(checksum_prefix, resource):
+ ''' Convert a resource name to a resource checksum database key. '''
+ return checksum_prefix + resource
+
+
+def migrate_resource_checksums(checksum_prefix, snap_resources):
+ ''' Migrate resource checksums from the old schema to the new one '''
+ for resource in snap_resources:
+ new_key = get_resource_checksum_db_key(checksum_prefix, resource)
+ if not db.get(new_key):
+ path = hookenv.resource_get(resource)
+ if path:
+ # old key from charms.reactive.helpers.any_file_changed
+ old_key = 'reactive.files_changed.' + path
+ old_checksum = db.get(old_key)
+ db.set(new_key, old_checksum)
+ else:
+ # No resource is attached. Previously, this meant no checksum
+ # would be calculated and stored. But now we calculate it as if
+ # it is a 0-byte resource, so let's go ahead and do that.
+ zero_checksum = hashlib.md5().hexdigest()
+ db.set(new_key, zero_checksum)
+
+
+def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
+ hookenv.status_set('maintenance', 'Checking resources')
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ old_checksum = db.get(key)
+ new_checksum = calculate_resource_checksum(resource)
+ if new_checksum != old_checksum:
+ return True
+ return False
+
+
+def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ checksum = calculate_resource_checksum(resource)
+ db.set(key, checksum)
+
+
+def get_ingress_address(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ # if they don't have ingress-addresses they are running a juju that
+ # doesn't support spaces, so just return the private address
+ return hookenv.unit_get('private-address')
+
+ addresses = network_info['ingress-addresses']
+
+ # Need to prefer non-fan IP addresses due to various issues, e.g.
+ # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
+ # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
+ # prioritize those last. Not technically correct, but good enough.
+ try:
+ sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731
+ addresses = sorted(addresses, key=sort_key)
+ except Exception:
+ hookenv.log(traceback.format_exc())
+
+ return addresses[0]
+
+
+def get_ingress_address6(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ return None
+
+ addresses = network_info['ingress-addresses']
+
+ for addr in addresses:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version == 6:
+ return str(ip_addr)
+ else:
+ return None
+
+
+def service_restart(service_name):
+ hookenv.status_set('maintenance', 'Restarting {0} service'.format(
+ service_name))
+ host.service_restart(service_name)
+
+
+def service_start(service_name):
+ hookenv.log('Starting {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def service_stop(service_name):
+ hookenv.log('Stopping {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def arch():
+ '''Return the package architecture as a string. Raise an exception if the
+ architecture is not supported by kubernetes.'''
+ # Get the package architecture for this system.
+ architecture = check_output(['dpkg', '--print-architecture']).rstrip()
+ # Convert the binary result into a string.
+ architecture = architecture.decode('utf-8')
+ return architecture
+
+
+def get_service_ip(service, namespace="kube-system", errors_fatal=True):
+ try:
+ output = kubectl('get', 'service', '--namespace', namespace, service,
+ '--output', 'json')
+ except CalledProcessError:
+ if errors_fatal:
+ raise
+ else:
+ return None
+ else:
+ svc = json.loads(output.decode())
+ return svc['spec']['clusterIP']
+
+
+def kubectl(*args):
+ ''' Run a kubectl cli command with a config file. Returns stdout and throws
+ an error if the command fails. '''
+ command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
+ hookenv.log('Executing {}'.format(command))
+ return check_output(command)
+
+
+def kubectl_success(*args):
+ ''' Runs kubectl with the given args. Returns True if successful, False if
+ not. '''
+ try:
+ kubectl(*args)
+ return True
+ except CalledProcessError:
+ return False
+
+
+def kubectl_manifest(operation, manifest):
+ ''' Wrap the kubectl creation command when using filepath resources
+ :param operation - one of get, create, delete, replace
+ :param manifest - filepath to the manifest
+ '''
+ # Deletions are a special case
+ if operation == 'delete':
+ # Ensure we immediately remove requested resources with --now
+ return kubectl_success(operation, '-f', manifest, '--now')
+ else:
+ # Guard against an error re-creating the same manifest multiple times
+ if operation == 'create':
+ # If we already have the definition, its probably safe to assume
+ # creation was true.
+ if kubectl_success('get', '-f', manifest):
+ hookenv.log('Skipping definition for {}'.format(manifest))
+ return True
+ # Execute the requested command that did not match any of the special
+ # cases above
+ return kubectl_success(operation, '-f', manifest)
+
+
+def get_node_name():
+ kubelet_extra_args = parse_extra_args('kubelet-extra-args')
+ cloud_provider = kubelet_extra_args.get('cloud-provider', '')
+ if is_state('endpoint.aws.ready'):
+ cloud_provider = 'aws'
+ elif is_state('endpoint.gcp.ready'):
+ cloud_provider = 'gce'
+ elif is_state('endpoint.openstack.ready'):
+ cloud_provider = 'openstack'
+ elif is_state('endpoint.vsphere.ready'):
+ cloud_provider = 'vsphere'
+ elif is_state('endpoint.azure.ready'):
+ cloud_provider = 'azure'
+ if cloud_provider == 'aws':
+ return getfqdn().lower()
+ else:
+ return gethostname().lower()
+
+
+def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
+ user='ubuntu', context='juju-context',
+ cluster='juju-cluster', password=None, token=None,
+ keystone=False, aws_iam_cluster_id=None):
+ '''Create a configuration for Kubernetes based on path using the supplied
+ arguments for values of the Kubernetes server, CA, key, certificate, user
+ context and cluster.'''
+ if not key and not certificate and not password and not token:
+ raise ValueError('Missing authentication mechanism.')
+
+ # token and password are mutually exclusive. Error early if both are
+ # present. The developer has requested an impossible situation.
+ # see: kubectl config set-credentials --help
+ if token and password:
+ raise ValueError('Token and Password are mutually exclusive.')
+ # Create the config file with the address of the master server.
+ cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
+ '--server={2} --certificate-authority={3} --embed-certs=true'
+ check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
+ # Delete old users
+ cmd = 'kubectl config --kubeconfig={0} unset users'
+ check_call(split(cmd.format(kubeconfig)))
+ # Create the credentials using the client flags.
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials {1} '.format(kubeconfig, user)
+
+ if key and certificate:
+ cmd = '{0} --client-key={1} --client-certificate={2} '\
+ '--embed-certs=true'.format(cmd, key, certificate)
+ if password:
+ cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
+ # This is mutually exclusive from password. They will not work together.
+ if token:
+ cmd = "{0} --token={1}".format(cmd, token)
+ check_call(split(cmd))
+ # Create a default context with the cluster.
+ cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
+ '--cluster={2} --user={3}'
+ check_call(split(cmd.format(kubeconfig, context, cluster, user)))
+ # Make the config use this new context.
+ cmd = 'kubectl config --kubeconfig={0} use-context {1}'
+ check_call(split(cmd.format(kubeconfig, context)))
+ if keystone:
+ # create keystone user
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials keystone-user'.format(kubeconfig)
+ check_call(split(cmd))
+ # create keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=keystone-user keystone'.format(kubeconfig, cluster)
+ check_call(split(cmd))
+ # use keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'use-context keystone'.format(kubeconfig)
+ check_call(split(cmd))
+ # manually add exec command until kubectl can do it for us
+ with open(kubeconfig, "r") as f:
+ content = f.read()
+ content = content.replace("""- name: keystone-user
+ user: {}""", """- name: keystone-user
+ user:
+ exec:
+ command: "/snap/bin/client-keystone-auth"
+ apiVersion: "client.authentication.k8s.io/v1beta1"
+""")
+ with open(kubeconfig, "w") as f:
+ f.write(content)
+ if aws_iam_cluster_id:
+ # create aws-iam context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=aws-iam-user aws-iam-authenticator'
+ check_call(split(cmd.format(kubeconfig, cluster)))
+
+ # append a user for aws-iam
+ cmd = 'kubectl --kubeconfig={0} config set-credentials ' \
+ 'aws-iam-user --exec-command=aws-iam-authenticator ' \
+ '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \
+ '--exec-arg="-r" --exec-arg="<>" ' \
+ '--exec-api-version=client.authentication.k8s.io/v1alpha1'
+ check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id)))
+
+ # not going to use aws-iam context by default since we don't have
+ # the desired arn. This will make the config not usable if copied.
+
+ # cmd = 'kubectl config --kubeconfig={0} ' \
+ # 'use-context aws-iam-authenticator'.format(kubeconfig)
+ # check_call(split(cmd))
+
+
+def parse_extra_args(config_key):
+ elements = hookenv.config().get(config_key, '').split()
+ args = {}
+
+ for element in elements:
+ if '=' in element:
+ key, _, value = element.partition('=')
+ args[key] = value
+ else:
+ args[element] = 'true'
+
+ return args
+
+
+def configure_kubernetes_service(key, service, base_args, extra_args_key):
+ db = unitdata.kv()
+
+ prev_args_key = key + service
+ prev_snap_args = db.get(prev_args_key) or {}
+
+ extra_args = parse_extra_args(extra_args_key)
+
+ args = {}
+ args.update(base_args)
+ args.update(extra_args)
+
+ # CIS benchmark action may inject kv config to pass failing tests. Merge
+ # these after the func args as they should take precedence.
+ cis_args_key = 'cis-' + service
+ cis_args = db.get(cis_args_key) or {}
+ args.update(cis_args)
+
+ # Remove any args with 'None' values (all k8s args are 'k=v') and
+ # construct an arg string for use by 'snap set'.
+ args = {k: v for k, v in args.items() if v is not None}
+ args = ['--%s="%s"' % arg for arg in args.items()]
+ args = ' '.join(args)
+
+ snap_opts = {}
+ for arg in prev_snap_args:
+ # remove previous args by setting to null
+ snap_opts[arg] = 'null'
+ snap_opts['args'] = args
+ snap_opts = ['%s=%s' % opt for opt in snap_opts.items()]
+
+ cmd = ['snap', 'set', service] + snap_opts
+ check_call(cmd)
+
+ # Now that we've started doing snap configuration through the "args"
+ # option, we should never need to clear previous args again.
+ db.set(prev_args_key, {})
+
+
+def _snap_common_path(component):
+ return Path('/var/snap/{}/common'.format(component))
+
+
+def cloud_config_path(component):
+ return _snap_common_path(component) / 'cloud-config.conf'
+
+
+def _gcp_creds_path(component):
+ return _snap_common_path(component) / 'gcp-creds.json'
+
+
+def _daemon_env_path(component):
+ return _snap_common_path(component) / 'environment'
+
+
+def _cloud_endpoint_ca_path(component):
+ return _snap_common_path(component) / 'cloud-endpoint-ca.crt'
+
+
+def encryption_config_path():
+ apiserver_snap_common_path = _snap_common_path('kube-apiserver')
+ encryption_conf_dir = apiserver_snap_common_path / 'encryption'
+ return encryption_conf_dir / 'encryption_config.yaml'
+
+
+def write_gcp_snap_config(component):
+ # gcp requires additional credentials setup
+ gcp = endpoint_from_flag('endpoint.gcp.ready')
+ creds_path = _gcp_creds_path(component)
+ with creds_path.open('w') as fp:
+ os.fchmod(fp.fileno(), 0o600)
+ fp.write(gcp.credentials)
+
+ # create a cloud-config file that sets token-url to nil to make the
+ # services use the creds env var instead of the metadata server, as
+ # well as making the cluster multizone
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text('[Global]\n'
+ 'token-url = nil\n'
+ 'multizone = true\n')
+
+ daemon_env_path = _daemon_env_path(component)
+ if daemon_env_path.exists():
+ daemon_env = daemon_env_path.read_text()
+ if not daemon_env.endswith('\n'):
+ daemon_env += '\n'
+ else:
+ daemon_env = ''
+ if gcp_creds_env_key not in daemon_env:
+ daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
+ daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
+ daemon_env_path.write_text(daemon_env)
+
+
+def generate_openstack_cloud_config():
+ # openstack requires additional credentials setup
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+
+ lines = [
+ '[Global]',
+ 'auth-url = {}'.format(openstack.auth_url),
+ 'region = {}'.format(openstack.region),
+ 'username = {}'.format(openstack.username),
+ 'password = {}'.format(openstack.password),
+ 'tenant-name = {}'.format(openstack.project_name),
+ 'domain-name = {}'.format(openstack.user_domain_name),
+ 'tenant-domain-name = {}'.format(openstack.project_domain_name),
+ ]
+ if openstack.endpoint_tls_ca:
+ lines.append('ca-file = /etc/config/endpoint-ca.cert')
+
+ lines.extend([
+ '',
+ '[LoadBalancer]',
+ ])
+
+ if openstack.has_octavia in (True, None):
+ # Newer integrator charm will detect whether underlying OpenStack has
+ # Octavia enabled so we can set this intelligently. If we're still
+ # related to an older integrator, though, default to assuming Octavia
+ # is available.
+ lines.append('use-octavia = true')
+ else:
+ lines.append('use-octavia = false')
+ lines.append('lb-provider = haproxy')
+ if openstack.subnet_id:
+ lines.append('subnet-id = {}'.format(openstack.subnet_id))
+ if openstack.floating_network_id:
+ lines.append('floating-network-id = {}'.format(
+ openstack.floating_network_id))
+ if openstack.lb_method:
+ lines.append('lb-method = {}'.format(
+ openstack.lb_method))
+ if openstack.manage_security_groups:
+ lines.append('manage-security-groups = {}'.format(
+ openstack.manage_security_groups))
+ if any([openstack.bs_version,
+ openstack.trust_device_path,
+ openstack.ignore_volume_az]):
+ lines.append('')
+ lines.append('[BlockStorage]')
+ if openstack.bs_version is not None:
+ lines.append('bs-version = {}'.format(openstack.bs_version))
+ if openstack.trust_device_path is not None:
+ lines.append('trust-device-path = {}'.format(
+ openstack.trust_device_path))
+ if openstack.ignore_volume_az is not None:
+ lines.append('ignore-volume-az = {}'.format(
+ openstack.ignore_volume_az))
+ return '\n'.join(lines) + '\n'
+
+
+def write_azure_snap_config(component):
+ azure = endpoint_from_flag('endpoint.azure.ready')
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text(json.dumps({
+ 'useInstanceMetadata': True,
+ 'useManagedIdentityExtension': True,
+ 'subscriptionId': azure.subscription_id,
+ 'resourceGroup': azure.resource_group,
+ 'location': azure.resource_group_location,
+ 'vnetName': azure.vnet_name,
+ 'vnetResourceGroup': azure.vnet_resource_group,
+ 'subnetName': azure.subnet_name,
+ 'securityGroupName': azure.security_group_name,
+ 'loadBalancerSku': 'standard'
+ }))
+
+
+def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr,
+ bind_address=None):
+ kube_proxy_opts = {}
+ kube_proxy_opts['cluster-cidr'] = cluster_cidr
+ kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
+ kube_proxy_opts['logtostderr'] = 'true'
+ kube_proxy_opts['v'] = '0'
+ num_apis = len(api_servers)
+ kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis]
+ kube_proxy_opts['hostname-override'] = get_node_name()
+ if bind_address:
+ kube_proxy_opts['bind-address'] = bind_address
+ elif is_ipv6(cluster_cidr):
+ kube_proxy_opts['bind-address'] = '::'
+
+ if host.is_container():
+ kube_proxy_opts['conntrack-max-per-core'] = '0'
+
+ if is_dual_stack(cluster_cidr):
+ kube_proxy_opts['feature-gates'] = "IPv6DualStack=true"
+
+ configure_kubernetes_service(configure_prefix, 'kube-proxy',
+ kube_proxy_opts, 'proxy-extra-args')
+
+
+def get_unit_number():
+ return int(hookenv.local_unit().split('/')[1])
+
+
+def cluster_cidr():
+ '''Return the cluster CIDR provided by the CNI'''
+ cni = endpoint_from_flag('cni.available')
+ if not cni:
+ return None
+ config = hookenv.config()
+ if 'default-cni' in config:
+ # master
+ default_cni = config['default-cni']
+ else:
+ # worker
+ kube_control = endpoint_from_flag('kube-control.dns.available')
+ if not kube_control:
+ return None
+ default_cni = kube_control.get_default_cni()
+ return cni.get_config(default=default_cni)['cidr']
+
+
+def is_dual_stack(cidrs):
+ '''Detect IPv4/IPv6 dual stack from CIDRs'''
+ return {net.version for net in get_networks(cidrs)} == {4, 6}
+
+
+def is_ipv4(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv4_network(cidrs) is not None
+
+
+def is_ipv6(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv6_network(cidrs) is not None
+
+
+def is_ipv6_preferred(cidrs):
+ '''Detect if IPv6 is preffered from CIDRs'''
+ return get_networks(cidrs)[0].version == 6
+
+
+def get_networks(cidrs):
+ '''Convert a comma-separated list of CIDRs to a list of networks.'''
+ if not cidrs:
+ return []
+ return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')]
+
+
+def get_ipv4_network(cidrs):
+ '''Get the IPv4 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(4)
+
+
+def get_ipv6_network(cidrs):
+ '''Get the IPv6 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(6)
+
+
+def enable_ipv6_forwarding():
+ '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.'''
+ check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1'])
+
+
+def get_bind_addrs(ipv4=True, ipv6=True):
+ '''Get all global-scoped addresses that we might bind to.'''
+ try:
+ output = check_output(["ip", "-br", "addr", "show", "scope", "global"])
+ except CalledProcessError:
+ # stderr will have any details, and go to the log
+ hookenv.log('Unable to determine global addresses', hookenv.ERROR)
+ return []
+
+ ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker')
+ accept_versions = set()
+ if ipv4:
+ accept_versions.add(4)
+ if ipv6:
+ accept_versions.add(6)
+
+ addrs = []
+ for line in output.decode('utf8').splitlines():
+ intf, state, *intf_addrs = line.split()
+ if state != 'UP' or any(intf.startswith(prefix)
+ for prefix in ignore_interfaces):
+ continue
+ for addr in intf_addrs:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version in accept_versions:
+ addrs.append(str(ip_addr))
+ return addrs
+
+
+class InvalidVMwareHost(Exception):
+ pass
+
+
+def _get_vmware_uuid():
+ serial_id_file = '/sys/class/dmi/id/product_serial'
+ # The serial id from VMWare VMs comes in following format:
+ # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54
+ try:
+ with open(serial_id_file, 'r') as f:
+ serial_string = f.read().strip()
+ if "VMware-" not in serial_string:
+ hookenv.log("Unable to find VMware ID in "
+ "product_serial: {}".format(serial_string))
+ raise InvalidVMwareHost
+ serial_string = serial_string.split(
+ "VMware-")[1].replace(" ", "").replace("-", "")
+ uuid = "%s-%s-%s-%s-%s" % (
+ serial_string[0:8], serial_string[8:12], serial_string[12:16],
+ serial_string[16:20], serial_string[20:32])
+ except IOError as err:
+ hookenv.log("Unable to read UUID from sysfs: {}".format(err))
+ uuid = 'UNKNOWN'
+
+ return uuid
+
diff --git a/kubernetes-master/lib/charms/layer/kubernetes_master.py b/kubernetes-master/lib/charms/layer/kubernetes_master.py
new file mode 100644
index 0000000..89a783e
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/kubernetes_master.py
@@ -0,0 +1,508 @@
+import csv
+import json
+import random
+import re
+import socket
+import string
+import tempfile
+from base64 import b64decode, b64encode
+from pathlib import Path
+import ipaddress
+from subprocess import check_output, CalledProcessError, TimeoutExpired
+from yaml import safe_load
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.templating import render
+from charmhelpers.core import unitdata
+from charmhelpers.fetch import apt_install
+from charms.reactive import endpoint_from_flag, is_flag_set
+from charms.layer import kubernetes_common
+
+
+AUTH_BACKUP_EXT = "pre-secrets"
+AUTH_BASIC_FILE = "/root/cdk/basic_auth.csv"
+AUTH_SECRET_NS = "kube-system"
+AUTH_SECRET_TYPE = "juju.is/token-auth"
+AUTH_TOKENS_FILE = "/root/cdk/known_tokens.csv"
+STANDARD_API_PORT = 6443
+CEPH_CONF_DIR = Path("/etc/ceph")
+CEPH_CONF = CEPH_CONF_DIR / "ceph.conf"
+CEPH_KEYRING = CEPH_CONF_DIR / "ceph.client.admin.keyring"
+
+db = unitdata.kv()
+
+
+def get_external_lb_endpoints():
+ """
+ Return a list of any external API load-balancer endpoints that have
+ been manually configured.
+ """
+ ha_connected = is_flag_set("ha.connected")
+ forced_lb_ips = hookenv.config("loadbalancer-ips").split()
+ vips = hookenv.config("ha-cluster-vip").split()
+ dns_record = hookenv.config("ha-cluster-dns")
+ if forced_lb_ips:
+ # if the user gave us IPs for the load balancer, assume
+ # they know what they are talking about and use that
+ # instead of our information.
+ return [(address, STANDARD_API_PORT) for address in forced_lb_ips]
+ elif ha_connected and vips:
+ return [(vip, STANDARD_API_PORT) for vip in vips]
+ elif ha_connected and dns_record:
+ return [(dns_record, STANDARD_API_PORT)]
+ else:
+ return []
+
+
+def get_lb_endpoints():
+ """
+ Return all load-balancer endpoints, whether from manual config or via
+ relation.
+ """
+ external_lb_endpoints = get_external_lb_endpoints()
+ loadbalancer = endpoint_from_flag("loadbalancer.available")
+
+ if external_lb_endpoints:
+ return external_lb_endpoints
+ elif loadbalancer:
+ lb_addresses = loadbalancer.get_addresses_ports()
+ return [(host.get("public-address"), host.get("port")) for host in lb_addresses]
+ else:
+ return []
+
+
+def get_api_endpoint(relation=None):
+ """
+ Determine the best endpoint for a client to connect to.
+
+ If a relation is given, it will take that into account when choosing an
+ endpoint.
+ """
+ endpoints = get_lb_endpoints()
+ if endpoints:
+ # select a single endpoint based on our local unit number
+ return endpoints[kubernetes_common.get_unit_number() % len(endpoints)]
+ elif relation:
+ ingress_address = hookenv.ingress_address(
+ relation.relation_id, hookenv.local_unit()
+ )
+ return (ingress_address, STANDARD_API_PORT)
+ else:
+ return (hookenv.unit_public_ip(), STANDARD_API_PORT)
+
+
+def install_ceph_common():
+ """Install ceph-common tools.
+
+ :return: None
+ """
+ ceph_admin = endpoint_from_flag("ceph-storage.available")
+
+ ceph_context = {
+ "mon_hosts": ceph_admin.mon_hosts(),
+ "fsid": ceph_admin.fsid(),
+ "auth_supported": ceph_admin.auth(),
+ "use_syslog": "true",
+ "ceph_public_network": "",
+ "ceph_cluster_network": "",
+ "loglevel": 1,
+ "hostname": socket.gethostname(),
+ }
+ # Install the ceph common utilities.
+ apt_install(["ceph-common"], fatal=True)
+
+ CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True)
+ # Render the ceph configuration from the ceph conf template.
+ render("ceph.conf", str(CEPH_CONF), ceph_context)
+
+ # The key can rotate independently of other ceph config, so validate it.
+ try:
+ with open(str(CEPH_KEYRING), "w") as key_file:
+ key_file.write("[client.admin]\n\tkey = {}\n".format(ceph_admin.key()))
+ except IOError as err:
+ hookenv.log("IOError writing admin.keyring: {}".format(err))
+
+
+def query_cephfs_enabled():
+ install_ceph_common()
+ try:
+ out = check_output(
+ ["ceph", "mds", "versions", "-c", str(CEPH_CONF)], timeout=60
+ )
+ return bool(json.loads(out.decode()))
+ except CalledProcessError:
+ hookenv.log("Unable to determine if CephFS is enabled", "ERROR")
+ return False
+ except TimeoutExpired:
+ hookenv.log("Timeout attempting to determine if CephFS is enabled", "ERROR")
+ return False
+
+
+def get_cephfs_fsname():
+ install_ceph_common()
+ try:
+ data = json.loads(check_output(["ceph", "fs", "ls", "-f", "json"], timeout=60))
+ except TimeoutExpired:
+ hookenv.log("Timeout attempting to determine fsname", "ERROR")
+ return None
+ for fs in data:
+ if "ceph-fs_data" in fs["data_pools"]:
+ return fs["name"]
+
+
+def deprecate_auth_file(auth_file):
+ """
+ In 1.19+, file-based authentication was deprecated in favor of webhook
+ auth. Write out generic files that inform the user of this.
+ """
+ csv_file = Path(auth_file)
+ csv_file.parent.mkdir(exist_ok=True)
+
+ csv_backup = Path("{}.{}".format(csv_file, AUTH_BACKUP_EXT))
+ if csv_file.exists() and not csv_backup.exists():
+ csv_file.rename(csv_backup)
+ with csv_file.open("w") as f:
+ f.write("# File-based authentication was removed in Charmed Kubernetes 1.19\n")
+
+
+def migrate_auth_file(filename):
+ """Create secrets or known tokens depending on what file is being migrated."""
+ with open(str(filename), "r") as f:
+ rows = list(csv.reader(f))
+
+ for row in rows:
+ try:
+ if row[0].startswith("#"):
+ continue
+ else:
+ if filename == AUTH_BASIC_FILE:
+ create_known_token(*row)
+ elif filename == AUTH_TOKENS_FILE:
+ create_secret(*row)
+ else:
+ # log and return if we don't recognize the auth file
+ hookenv.log("Unknown auth file: {}".format(filename))
+ return False
+ except IndexError:
+ pass
+ deprecate_auth_file(filename)
+ return True
+
+
+def generate_rfc1123(length=10):
+ """Generate a random string compliant with RFC 1123.
+
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
+
+ param: length - the length of the string to generate
+ """
+ length = 253 if length > 253 else length
+ valid_chars = string.ascii_lowercase + string.digits
+ rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length))
+ return rand_str
+
+
+def token_generator(length=32):
+ """Generate a random token for use in account tokens.
+
+ param: length - the length of the token to generate
+ """
+ alpha = string.ascii_letters + string.digits
+ token = "".join(random.SystemRandom().choice(alpha) for _ in range(length))
+ return token
+
+
+def create_known_token(token, username, user, groups=None):
+ known_tokens = Path(AUTH_TOKENS_FILE)
+ known_tokens.parent.mkdir(exist_ok=True)
+ csv_fields = ["token", "username", "user", "groups"]
+
+ try:
+ with known_tokens.open("r") as f:
+ tokens_by_user = {r["user"]: r for r in csv.DictReader(f, csv_fields)}
+ except FileNotFoundError:
+ tokens_by_user = {}
+ tokens_by_username = {r["username"]: r for r in tokens_by_user.values()}
+
+ if user in tokens_by_user:
+ record = tokens_by_user[user]
+ elif username in tokens_by_username:
+ record = tokens_by_username[username]
+ else:
+ record = tokens_by_user[user] = {}
+ record.update(
+ {
+ "token": token,
+ "username": username,
+ "user": user,
+ "groups": groups,
+ }
+ )
+
+ if not record["groups"]:
+ del record["groups"]
+
+ with known_tokens.open("w") as f:
+ csv.DictWriter(f, csv_fields, lineterminator="\n").writerows(
+ tokens_by_user.values()
+ )
+
+
+def create_secret(token, username, user, groups=None):
+ secrets = get_secret_names()
+ if username in secrets:
+ # Use existing secret ID if one exists for our username
+ secret_id = secrets[username]
+ else:
+ # secret IDs must be unique and rfc1123 compliant
+ sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower())
+ secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10))
+
+ # The authenticator expects tokens to be in the form user::token
+ token_delim = "::"
+ if token_delim not in token:
+ token = "{}::{}".format(user, token)
+
+ context = {
+ "type": AUTH_SECRET_TYPE,
+ "secret_name": secret_id,
+ "secret_namespace": AUTH_SECRET_NS,
+ "user": b64encode(user.encode("UTF-8")).decode("utf-8"),
+ "username": b64encode(username.encode("UTF-8")).decode("utf-8"),
+ "password": b64encode(token.encode("UTF-8")).decode("utf-8"),
+ "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "",
+ }
+ with tempfile.NamedTemporaryFile() as tmp_manifest:
+ render(
+ "cdk.master.auth-webhook-secret.yaml", tmp_manifest.name, context=context
+ )
+
+ if kubernetes_common.kubectl_manifest("apply", tmp_manifest.name):
+ hookenv.log("Created secret for {}".format(username))
+ return True
+ else:
+ hookenv.log("WARN: Unable to create secret for {}".format(username))
+ return False
+
+
+def delete_secret(secret_id):
+ """Delete a given secret id."""
+ # If this fails, it's most likely because we're trying to delete a secret
+ # that doesn't exist. Let the caller decide if failure is a problem.
+ return kubernetes_common.kubectl_success(
+ "delete", "secret", "-n", AUTH_SECRET_NS, secret_id
+ )
+
+
+def get_csv_password(csv_fname, user):
+ """Get the password for the given user within the csv file provided."""
+ root_cdk = "/root/cdk"
+ tokens_fname = Path(root_cdk) / csv_fname
+ if not tokens_fname.is_file():
+ return None
+ with tokens_fname.open("r") as stream:
+ for line in stream:
+ record = line.split(",")
+ try:
+ if record[1] == user:
+ return record[0]
+ except IndexError:
+ # probably a blank line or comment; move on
+ continue
+ return None
+
+
+def get_secret_names():
+ """Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
+ try:
+ output = kubernetes_common.kubectl(
+ "get",
+ "secrets",
+ "-n",
+ AUTH_SECRET_NS,
+ "--field-selector",
+ "type={}".format(AUTH_SECRET_TYPE),
+ "-o",
+ "json",
+ ).decode("UTF-8")
+ except (CalledProcessError, FileNotFoundError):
+ # The api server may not be up, or we may be trying to run kubelet before
+ # the snap is installed. Send back an empty dict.
+ hookenv.log("Unable to get existing secrets", level=hookenv.WARNING)
+ return {}
+
+ secrets = json.loads(output)
+ secret_names = {}
+ if "items" in secrets:
+ for secret in secrets["items"]:
+ try:
+ secret_id = secret["metadata"]["name"]
+ username_b64 = secret["data"]["username"].encode("UTF-8")
+ except (KeyError, TypeError):
+ # CK secrets will have populated 'data', but not all secrets do
+ continue
+ secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id
+ return secret_names
+
+
+def get_secret_password(username):
+ """Get the password for the given user from the secret that CK created."""
+ try:
+ output = kubernetes_common.kubectl(
+ "get",
+ "secrets",
+ "-n",
+ AUTH_SECRET_NS,
+ "--field-selector",
+ "type={}".format(AUTH_SECRET_TYPE),
+ "-o",
+ "json",
+ ).decode("UTF-8")
+ except CalledProcessError:
+ # NB: apiserver probably isn't up. This can happen on boostrap or upgrade
+ # while trying to build kubeconfig files. If we need the 'admin' token during
+ # this time, pull it directly out of the kubeconfig file if possible.
+ token = None
+ if username == "admin":
+ admin_kubeconfig = Path("/root/.kube/config")
+ if admin_kubeconfig.exists():
+ with admin_kubeconfig.open("r") as f:
+ data = safe_load(f)
+ try:
+ token = data["users"][0]["user"]["token"]
+ except (KeyError, ValueError):
+ pass
+ return token
+ except FileNotFoundError:
+ # New deployments may ask for a token before the kubectl snap is installed.
+ # Give them nothing!
+ return None
+
+ secrets = json.loads(output)
+ if "items" in secrets:
+ for secret in secrets["items"]:
+ try:
+ data_b64 = secret["data"]
+ password_b64 = data_b64["password"].encode("UTF-8")
+ username_b64 = data_b64["username"].encode("UTF-8")
+ except (KeyError, TypeError):
+ # CK authn secrets will have populated 'data', but not all secrets do
+ continue
+
+ password = b64decode(password_b64).decode("UTF-8")
+ secret_user = b64decode(username_b64).decode("UTF-8")
+ if username == secret_user:
+ return password
+ return None
+
+
+try:
+ ipaddress.IPv4Network.subnet_of
+except AttributeError:
+ # Returns True if a is subnet of b
+ # This method is copied from cpython as it is available only from
+ # python 3.7
+ # https://github.com/python/cpython/blob/3.7/Lib/ipaddress.py#L1000
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError("{} and {} are not of the same version".format(a, b))
+ return (
+ b.network_address <= a.network_address
+ and b.broadcast_address >= a.broadcast_address
+ )
+ except AttributeError:
+ raise TypeError(
+ "Unable to test subnet containment " "between {} and {}".format(a, b)
+ )
+
+ ipaddress.IPv4Network.subnet_of = _is_subnet_of
+ ipaddress.IPv6Network.subnet_of = _is_subnet_of
+
+
+def is_service_cidr_expansion():
+ service_cidr_from_db = db.get("kubernetes-master.service-cidr")
+ service_cidr_from_config = hookenv.config("service-cidr")
+ if not service_cidr_from_db:
+ return False
+
+ # Do not consider as expansion if both old and new service cidr are same
+ if service_cidr_from_db == service_cidr_from_config:
+ return False
+
+ current_networks = kubernetes_common.get_networks(service_cidr_from_db)
+ new_networks = kubernetes_common.get_networks(service_cidr_from_config)
+ if len(current_networks) != len(new_networks) or not all(
+ cur.subnet_of(new) for cur, new in zip(current_networks, new_networks)
+ ):
+ hookenv.log("WARN: New k8s service cidr not superset of old one")
+ return False
+
+ return True
+
+
+def service_cidr():
+ """ Return the charm's service-cidr config"""
+ frozen_cidr = db.get("kubernetes-master.service-cidr")
+ return frozen_cidr or hookenv.config("service-cidr")
+
+
+def freeze_service_cidr():
+ """Freeze the service CIDR. Once the apiserver has started, we can no
+ longer safely change this value."""
+ frozen_service_cidr = db.get("kubernetes-master.service-cidr")
+ if not frozen_service_cidr or is_service_cidr_expansion():
+ db.set("kubernetes-master.service-cidr", hookenv.config("service-cidr"))
+
+
+def get_preferred_service_network(service_cidrs):
+ """Get the network preferred for cluster service, preferring IPv4"""
+ net_ipv4 = kubernetes_common.get_ipv4_network(service_cidrs)
+ net_ipv6 = kubernetes_common.get_ipv6_network(service_cidrs)
+ return net_ipv4 or net_ipv6
+
+
+def get_dns_ip():
+ return kubernetes_common.get_service_ip("kube-dns", namespace="kube-system")
+
+
+def get_kubernetes_service_ips():
+ """Get the IP address(es) for the kubernetes service based on the cidr."""
+ return [
+ next(network.hosts()).exploded
+ for network in kubernetes_common.get_networks(service_cidr())
+ ]
+
+
+def get_snap_revs(snaps):
+ """Get a dict of snap revisions for a given list of snaps."""
+ channel = hookenv.config("channel")
+ rev_info = {}
+ for s in sorted(snaps):
+ try:
+ # valid info should looke like:
+ # ...
+ # channels:
+ # latest/stable: 1.18.8 2020-08-27 (1595) 22MB classic
+ # latest/candidate: 1.18.8 2020-08-27 (1595) 22MB classic
+ # ...
+ info = check_output(["snap", "info", s]).decode("utf8", errors="ignore")
+ except CalledProcessError:
+ # If 'snap info' fails for whatever reason, just empty the info
+ info = ""
+ snap_rev = None
+ yaml_data = safe_load(info)
+ if yaml_data and "channels" in yaml_data:
+ try:
+ # valid data should look like:
+ # ['1.18.8', '2020-08-27', '(1604)', '21MB', 'classic']
+ d = yaml_data["channels"][channel].split()
+ snap_rev = d[2].strip("()")
+ except (KeyError, IndexError):
+ hookenv.log(
+ "Could not determine revision for snap: {}".format(s),
+ level=hookenv.WARNING,
+ )
+ rev_info[s] = snap_rev
+ return rev_info
diff --git a/kubernetes-master/lib/charms/layer/nagios.py b/kubernetes-master/lib/charms/layer/nagios.py
new file mode 100644
index 0000000..f6ad998
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/nagios.py
@@ -0,0 +1,60 @@
+from pathlib import Path
+
+NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins'
+
+
+def install_nagios_plugin_from_text(text, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ text: Plugin source code (str)
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ # we could complain here, test the files are the same contents, or
+ # just bail. Idempotency is a big deal in Juju, so I'd like to be
+ # ok with being called with the same file multiple times, but we
+ # certainly want to catch the case where multiple layers are using
+ # the same filename for their nagios checks.
+ dest = dest_path.read_text()
+ if dest == text:
+ # same file
+ return dest_path
+ # different file contents!
+ # maybe someone changed options or something so we need to write
+ # it again
+
+ dest_path.write_text(text)
+ dest_path.chmod(0o755)
+
+ return dest_path
+
+
+def install_nagios_plugin_from_file(source_file_path, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ source_file_path: Path to plugin source file
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+
+ return install_nagios_plugin_from_text(Path(source_file_path).read_text(),
+ plugin_name)
+
+
+def remove_nagios_plugin(plugin_name):
+ """ Remove a nagios plugin.
+
+ Args:
+ plugin_name: Name of the plugin in nagios
+
+ Returns: None
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ dest_path.unlink()
diff --git a/kubernetes-master/lib/charms/layer/options.py b/kubernetes-master/lib/charms/layer/options.py
new file mode 100644
index 0000000..d3f273f
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/options.py
@@ -0,0 +1,26 @@
+import os
+from pathlib import Path
+
+import yaml
+
+
+_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.'))
+_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml'
+_CACHE = {}
+
+
+def get(section=None, option=None, layer_file=_DEFAULT_FILE):
+ if option and not section:
+ raise ValueError('Cannot specify option without section')
+
+ layer_file = (_CHARM_PATH / layer_file).resolve()
+ if layer_file not in _CACHE:
+ with layer_file.open() as fp:
+ _CACHE[layer_file] = yaml.safe_load(fp.read())
+
+ data = _CACHE[layer_file].get('options', {})
+ if section:
+ data = data.get(section, {})
+ if option:
+ data = data.get(option)
+ return data
diff --git a/kubernetes-master/lib/charms/layer/snap.py b/kubernetes-master/lib/charms/layer/snap.py
new file mode 100644
index 0000000..88b8d89
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/snap.py
@@ -0,0 +1,455 @@
+# Copyright 2016-2019 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+
+import tenacity
+import yaml
+
+from charmhelpers.core import hookenv
+from charms import layer
+from charms import reactive
+from charms.reactive.helpers import any_file_changed, data_changed
+from datetime import datetime, timedelta
+
+
+def get_installed_flag(snapname):
+ return "snap.installed.{}".format(snapname)
+
+
+def get_refresh_available_flag(snapname):
+ return "snap.refresh-available.{}".format(snapname)
+
+
+def get_local_flag(snapname):
+ return "snap.local.{}".format(snapname)
+
+
+def get_disabled_flag(snapname):
+ return "snap.disabled.{}".format(snapname)
+
+
+def install(snapname, **kw):
+ """Install a snap.
+
+ Snap will be installed from the coresponding resource if available,
+ otherwise from the Snap Store.
+
+ Sets the snap.installed.{snapname} flag.
+
+ If the snap.installed.{snapname} flag is already set then the refresh()
+ function is called.
+ """
+ installed_flag = get_installed_flag(snapname)
+ local_flag = get_local_flag(snapname)
+ if reactive.is_flag_set(installed_flag):
+ refresh(snapname, **kw)
+ else:
+ if hookenv.has_juju_version("2.0"):
+ res_path = _resource_get(snapname)
+ if res_path is False:
+ _install_store(snapname, **kw)
+ else:
+ _install_local(res_path, **kw)
+ reactive.set_flag(local_flag)
+ else:
+ _install_store(snapname, **kw)
+ reactive.set_flag(installed_flag)
+
+ # Installing any snap will first ensure that 'core' is installed. Set an
+ # appropriate flag for consumers that want to get/set core options.
+ core_installed = get_installed_flag("core")
+ if not reactive.is_flag_set(core_installed):
+ reactive.set_flag(core_installed)
+
+
+def is_installed(snapname):
+ return reactive.is_flag_set(get_installed_flag(snapname))
+
+
+def is_local(snapname):
+ return reactive.is_flag_set(get_local_flag(snapname))
+
+
+def get_installed_snaps():
+ """Return a list of snaps which are installed by this layer."""
+ flag_prefix = "snap.installed."
+ return [flag[len(flag_prefix) :] for flag in reactive.get_flags() if flag.startswith(flag_prefix)]
+
+
+def refresh(snapname, **kw):
+ """Update a snap.
+
+ Snap will be pulled from the coresponding resource if available
+ and reinstalled if it has changed. Otherwise a 'snap refresh' is
+ run updating the snap from the Snap Store, potentially switching
+ channel and changing confinement options.
+ """
+ # Note that once you upload a resource, you can't remove it.
+ # This means we don't need to cope with an operator switching
+ # from a resource provided to a store provided snap, because there
+ # is no way for them to do that. Well, actually the operator could
+ # upload a zero byte resource, but then we would need to uninstall
+ # the snap before reinstalling from the store and that has the
+ # potential for data loss.
+ local_flag = get_local_flag(snapname)
+ if hookenv.has_juju_version("2.0"):
+ res_path = _resource_get(snapname)
+ if res_path is False:
+ _refresh_store(snapname, **kw)
+ reactive.clear_flag(local_flag)
+ else:
+ _install_local(res_path, **kw)
+ reactive.set_flag(local_flag)
+ else:
+ _refresh_store(snapname, **kw)
+ reactive.clear_flag(local_flag)
+
+
+def remove(snapname):
+ hookenv.log("Removing snap {}".format(snapname))
+ subprocess.check_call(["snap", "remove", snapname])
+ reactive.clear_flag(get_installed_flag(snapname))
+
+
+def connect(plug, slot):
+ """Connect or reconnect a snap plug with a slot.
+
+ Each argument must be a two element tuple, corresponding to
+ the two arguments to the 'snap connect' command.
+ """
+ hookenv.log("Connecting {} to {}".format(plug, slot), hookenv.DEBUG)
+ subprocess.check_call(["snap", "connect", plug, slot])
+
+
+def connect_all():
+ """Connect or reconnect all interface connections defined in layer.yaml.
+
+ This method will fail if called before all referenced snaps have been
+ installed.
+ """
+ opts = layer.options("snap")
+ for snapname, snap_opts in opts.items():
+ for plug, slot in snap_opts.get("connect", []):
+ connect(plug, slot)
+
+
+def disable(snapname):
+ """Disables a snap in the system
+
+ Sets the snap.disabled.{snapname} flag
+
+ This method doesn't affect any snap flag if requested snap does not
+ exist
+ """
+ hookenv.log("Disabling {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot disable {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "disable", snapname])
+ reactive.set_flag(get_disabled_flag(snapname))
+
+
+def enable(snapname):
+ """Enables a snap in the system
+
+ Clears the snap.disabled.{snapname} flag
+
+ This method doesn't affect any snap flag if requeted snap does not
+ exist
+ """
+ hookenv.log("Enabling {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot enable {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "enable", snapname])
+ reactive.clear_flag(get_disabled_flag(snapname))
+
+
+def restart(snapname):
+ """Restarts a snap in the system
+
+ This method doesn't affect any snap flag if requested snap does not
+ exist
+ """
+ hookenv.log("Restarting {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot restart {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "restart", snapname])
+
+
+def set(snapname, key, value):
+ """Changes configuration options in a snap
+
+ This method will fail if snapname is not an installed snap
+ """
+ hookenv.log("Set config {}={} for snap {}".format(key, value, snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot set {} snap config because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "set", snapname, "{}={}".format(key, value)])
+
+
+def set_refresh_timer(timer=""):
+ """Set the system refresh.timer option (snapd 2.31+)
+
+ This method sets how often snapd will refresh installed snaps. Call with
+ an empty timer string to use the system default (currently 4x per day).
+ Use 'max' to schedule refreshes as far into the future as possible
+ (currently 1 month). Also accepts custom timer strings as defined in the
+ refresh.timer section here:
+ https://forum.snapcraft.io/t/system-options/87
+
+ This method does not validate custom strings and will lead to a
+ CalledProcessError if an invalid string is given.
+
+ :param: timer: empty string (default), 'max', or custom string
+ """
+ if timer == "max":
+ # A month from yesterday is the farthest we should delay to safely stay
+ # under the 1 month max. Translate that to a valid refresh.timer value.
+ # Examples:
+ # - Today is Friday the 13th, set the refresh timer to
+ # 'thu2' (Thursday the 12th is the 2nd thursday of the month).
+ # - Today is Tuesday the 1st, set the refresh timer to
+ # 'mon5' (Monday the [28..31] is the 5th monday of the month).
+ yesterday = datetime.now() - timedelta(1)
+ dow = yesterday.strftime("%a").lower()
+ # increment after int division because we want occurrence 1-5, not 0-4.
+ occurrence = yesterday.day // 7 + 1
+ timer = "{}{}".format(dow, occurrence)
+
+ # NB: 'system' became synonymous with 'core' in 2.32.5, but we use 'core'
+ # here to ensure max compatibility.
+ set(snapname="core", key="refresh.timer", value=timer)
+ subprocess.check_call(["systemctl", "restart", "snapd.service"])
+
+
+def get(snapname, key):
+ """Gets configuration options for a snap
+
+ This method returns the stripped output from the snap get command.
+ This method will fail if snapname is not an installed snap.
+ """
+ hookenv.log("Get config {} for snap {}".format(key, snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get {} snap config because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ return subprocess.check_output(["snap", "get", snapname, key]).strip()
+
+
+def get_installed_version(snapname):
+ """Gets the installed version of a snapname.
+ This function will fail if snapname is not an installed snap.
+ """
+ cmd = ["snap", "info", snapname]
+ hookenv.log("Get installed key for snap {}".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get {} snap installed version because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+ return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("installed:")[-1].split()[0]
+
+
+def get_installed_channel(snapname):
+ """Gets the tracking (channel) of a snapname.
+ This function will fail if snapname is not an installed snap.
+ """
+ cmd = ["snap", "info", snapname]
+ hookenv.log("Get channel for snap {}".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get snap tracking (channel) because it is not installed",
+ hookenv.WARNING,
+ )
+ return
+ return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0]
+
+
+def _snap_args(
+ channel="stable",
+ devmode=False,
+ jailmode=False,
+ dangerous=False,
+ force_dangerous=False,
+ connect=None,
+ classic=False,
+ revision=None,
+):
+ yield "--channel={}".format(channel)
+ if devmode is True:
+ yield "--devmode"
+ if jailmode is True:
+ yield "--jailmode"
+ if force_dangerous is True or dangerous is True:
+ yield "--dangerous"
+ if classic is True:
+ yield "--classic"
+ if revision is not None:
+ yield "--revision={}".format(revision)
+
+
+def _install_local(path, **kw):
+ key = "snap.local.{}".format(path)
+ if data_changed(key, kw) or any_file_changed([path]):
+ cmd = ["snap", "install"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append("--dangerous")
+ cmd.append(path)
+ hookenv.log("Installing {} from local resource".format(path))
+ subprocess.check_call(cmd)
+
+
+def _install_store(snapname, **kw):
+ """Install snap from store
+
+ :param snapname: Name of snap to install
+ :type snapname: str
+ :param kw: Keyword arguments to pass on to ``snap install``
+ :type kw: Dict[str, str]
+ :raises: subprocess.CalledProcessError
+ """
+ cmd = ["snap", "install"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append(snapname)
+ hookenv.log("Installing {} from store".format(snapname))
+
+ for attempt in tenacity.Retrying(
+ wait=tenacity.wait_fixed(10), # seconds
+ stop=tenacity.stop_after_attempt(3),
+ reraise=True,
+ ):
+ with attempt:
+ try:
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ hookenv.log(
+ 'Installation successful cmd="{}" output="{}"'.format(cmd, out),
+ level=hookenv.DEBUG,
+ )
+ reactive.clear_flag(get_local_flag(snapname))
+ except subprocess.CalledProcessError as cp:
+ hookenv.log(
+ 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output),
+ level=hookenv.ERROR,
+ )
+ raise
+
+
+def _refresh_store(snapname, **kw):
+ if not data_changed("snap.opts.{}".format(snapname), kw):
+ return
+
+ # --amend allows us to refresh from a local resource
+ cmd = ["snap", "refresh", "--amend"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append(snapname)
+ hookenv.log("Refreshing {} from store".format(snapname))
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ print(out)
+
+
+def _resource_get(snapname):
+ """Used to fetch the resource path of the given name.
+
+ This wrapper obtains a resource path and adds an additional
+ check to return False if the resource is zero length.
+ """
+ res_path = hookenv.resource_get(snapname)
+ if res_path and os.stat(res_path).st_size != 0:
+ return res_path
+ return False
+
+
+def get_available_refreshes():
+ """Return a list of snaps which have refreshes available."""
+ try:
+ out = subprocess.check_output(["snap", "refresh", "--list"]).decode("utf8")
+ except subprocess.CalledProcessError:
+ # If snap refresh fails for whatever reason, we should just return no
+ # refreshes available - LP:1869630.
+ return []
+
+ if out == "All snaps up to date.":
+ return []
+ else:
+ return [line.split()[0] for line in out.splitlines()[1:]]
+
+
+def is_refresh_available(snapname):
+ """Check whether a new revision is available for the given snap."""
+ return reactive.is_flag_set(get_refresh_available_flag(snapname))
+
+
+def _check_refresh_available(snapname):
+ return snapname in get_available_refreshes()
+
+
+def create_cohort_snapshot(snapname):
+ """Create a new cohort key for the given snap.
+
+ Cohort keys represent a snapshot of the revision of a snap at the time
+ the key was created. These keys can then be used on any machine to lock
+ the revision of the snap until a new cohort is joined (or the key expires,
+ after 90 days). This is used to maintain consistency of the revision of
+ the snap across units or applications, and to manage the refresh of the
+ snap in a controlled manner.
+
+ Returns a cohort key.
+ """
+ out = subprocess.check_output(["snap", "create-cohort", snapname])
+ data = yaml.safe_load(out.decode("utf8"))
+ return data["cohorts"][snapname]["cohort-key"]
+
+
+def join_cohort_snapshot(snapname, cohort_key):
+ """Refresh the snap into the given cohort.
+
+ If the snap was previously in a cohort, this will update the revision
+ to that of the new cohort snapshot. Note that this does not change the
+ channel that the snap is in, only the revision within that channel.
+ """
+ if is_local(snapname):
+ # joining a cohort can override a locally installed snap
+ hookenv.log("Skipping joining cohort for local snap: " "{}".format(snapname))
+ return
+ subprocess.check_output(["snap", "refresh", snapname, "--cohort", cohort_key])
+ # even though we just refreshed to the latest in the cohort, it's
+ # slightly possible that there's a newer rev available beyond the cohort
+ reactive.toggle_flag(get_refresh_available_flag(snapname), _check_refresh_available(snapname))
diff --git a/kubernetes-master/lib/charms/layer/status.py b/kubernetes-master/lib/charms/layer/status.py
new file mode 100644
index 0000000..95b2997
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/status.py
@@ -0,0 +1,189 @@
+import inspect
+import errno
+import subprocess
+import yaml
+from enum import Enum
+from functools import wraps
+from pathlib import Path
+
+from charmhelpers.core import hookenv
+from charms import layer
+
+
+_orig_call = subprocess.call
+_statuses = {'_initialized': False,
+ '_finalized': False}
+
+
+class WorkloadState(Enum):
+ """
+ Enum of the valid workload states.
+
+ Valid options are:
+
+ * `WorkloadState.MAINTENANCE`
+ * `WorkloadState.BLOCKED`
+ * `WorkloadState.WAITING`
+ * `WorkloadState.ACTIVE`
+ """
+ # note: order here determines precedence of state
+ MAINTENANCE = 'maintenance'
+ BLOCKED = 'blocked'
+ WAITING = 'waiting'
+ ACTIVE = 'active'
+
+
+def maintenance(message):
+ """
+ Set the status to the `MAINTENANCE` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.MAINTENANCE, message)
+
+
+def maint(message):
+ """
+ Shorthand alias for
+ [maintenance](status.md#charms.layer.status.maintenance).
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ maintenance(message)
+
+
+def blocked(message):
+ """
+ Set the status to the `BLOCKED` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.BLOCKED, message)
+
+
+def waiting(message):
+ """
+ Set the status to the `WAITING` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.WAITING, message)
+
+
+def active(message):
+ """
+ Set the status to the `ACTIVE` state with the given operator message.
+
+ # Parameters
+ `message` (str): Message to convey to the operator.
+ """
+ status_set(WorkloadState.ACTIVE, message)
+
+
+def status_set(workload_state, message):
+ """
+ Set the status to the given workload state with a message.
+
+ # Parameters
+ `workload_state` (WorkloadState or str): State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+ `message` (str): Message to convey to the operator.
+ """
+ if not isinstance(workload_state, WorkloadState):
+ workload_state = WorkloadState(workload_state)
+ if workload_state is WorkloadState.MAINTENANCE:
+ _status_set_immediate(workload_state, message)
+ return
+ layer = _find_calling_layer()
+ _statuses.setdefault(workload_state, []).append((layer, message))
+ if not _statuses['_initialized'] or _statuses['_finalized']:
+ # We either aren't initialized, so the finalizer may never be run,
+ # or the finalizer has already run, so it won't run again. In either
+ # case, we need to manually invoke it to ensure the status gets set.
+ _finalize()
+
+
+def _find_calling_layer():
+ for frame in inspect.stack():
+ # switch to .filename when trusty (Python 3.4) is EOL
+ fn = Path(frame[1])
+ if fn.parent.stem not in ('reactive', 'layer', 'charms'):
+ continue
+ layer_name = fn.stem
+ if layer_name == 'status':
+ continue # skip our own frames
+ return layer_name
+ return None
+
+
+def _initialize():
+ if not _statuses['_initialized']:
+ if layer.options.get('status', 'patch-hookenv'):
+ _patch_hookenv()
+ hookenv.atexit(_finalize)
+ _statuses['_initialized'] = True
+
+
+def _finalize():
+ if _statuses['_initialized']:
+ # If we haven't been initialized, we can't truly be finalized.
+ # This makes things more efficient if an action sets a status
+ # but subsequently starts the reactive bus.
+ _statuses['_finalized'] = True
+ charm_name = hookenv.charm_name()
+ charm_dir = Path(hookenv.charm_dir())
+ with charm_dir.joinpath('layer.yaml').open() as fp:
+ includes = yaml.safe_load(fp.read()).get('includes', [])
+ layer_order = includes + [charm_name]
+
+ for workload_state in WorkloadState:
+ if workload_state not in _statuses:
+ continue
+ if not _statuses[workload_state]:
+ continue
+
+ def _get_key(record):
+ layer_name, message = record
+ if layer_name in layer_order:
+ return layer_order.index(layer_name)
+ else:
+ return 0
+
+ sorted_statuses = sorted(_statuses[workload_state], key=_get_key)
+ layer_name, message = sorted_statuses[-1]
+ _status_set_immediate(workload_state, message)
+ break
+
+
+def _status_set_immediate(workload_state, message):
+ workload_state = workload_state.value
+ try:
+ hookenv.log('status-set: {}: {}'.format(workload_state, message),
+ hookenv.INFO)
+ ret = _orig_call(['status-set', workload_state, message])
+ if ret == 0:
+ return
+ except OSError as e:
+ # ignore status-set not available on older controllers
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def _patch_hookenv():
+ # we can't patch hookenv.status_set directly because other layers may have
+ # already imported it into their namespace, so we have to patch sp.call
+ subprocess.call = _patched_call
+
+
+@wraps(_orig_call)
+def _patched_call(cmd, *args, **kwargs):
+ if not isinstance(cmd, list) or cmd[0] != 'status-set':
+ return _orig_call(cmd, *args, **kwargs)
+ _, workload_state, message = cmd
+ status_set(workload_state, message)
+ return 0 # make hookenv.status_set not emit spurious failure logs
diff --git a/kubernetes-master/lib/charms/layer/tls_client.py b/kubernetes-master/lib/charms/layer/tls_client.py
new file mode 100644
index 0000000..b2980dc
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/tls_client.py
@@ -0,0 +1,61 @@
+# Copyright 2016-2017 Canonical Ltd.
+#
+# This file is part of the tls-client layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import log
+from charmhelpers.core import unitdata
+
+from charms.reactive import remove_state
+from charms.reactive import endpoint_from_flag
+
+
+def reset_certificate_write_flag(cert_type):
+ """
+ Reset the certificate written flag so notification will work on the next
+ write cert_type must be 'server', 'client', or 'ca' to indicate type of
+ certificate
+ """
+ if cert_type not in ['server', 'client', 'ca']:
+ log('Unknown certificate type!')
+ else:
+ remove_state('tls_client.{0}.certificate.written'.format(cert_type))
+
+
+def request_server_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_server_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('server', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
+
+
+def request_client_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_client_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('client', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
diff --git a/kubernetes-master/lib/charms/layer/vault_kv.py b/kubernetes-master/lib/charms/layer/vault_kv.py
new file mode 100644
index 0000000..8ca023c
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/vault_kv.py
@@ -0,0 +1,260 @@
+import json
+from hashlib import md5
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+from charmhelpers.contrib.openstack.vaultlocker import retrieve_secret_id
+from charms.reactive import data_changed
+from charms.reactive import endpoint_from_flag
+from charms.reactive import set_flag, clear_flag, get_flags
+
+import requests
+import hvac
+
+
+def log(msg, *args, **kwargs):
+ hookenv.log('vault-kv.log: {}'.format(msg.format(*args, **kwargs)),
+ level=hookenv.DEBUG)
+
+
+class VaultNotReady(Exception):
+ """
+ Exception indicating that Vault was accessed before it was ready.
+ """
+ pass
+
+
+class _Singleton(type):
+ # metaclass to make a class a singleton
+ def __call__(cls, *args, **kwargs):
+ if not isinstance(getattr(cls, '_singleton_instance', None), cls):
+ cls._singleton_instance = super().__call__(*args, **kwargs)
+ return cls._singleton_instance
+
+
+class _VaultBaseKV(dict, metaclass=_Singleton):
+ _path = None # set by subclasses
+
+ def __init__(self):
+ response = self._client.read(self._path)
+ data = response['data'] if response else {}
+ super().__init__(data)
+
+ @property
+ def _client(self):
+ """
+ Get an authenticated hvac.Client.
+
+ The authentication token for the client is only valid for 60 seconds,
+ after which a new client will need to be authenticated.
+ """
+ try:
+ log('Logging {cls} in to {vault_url}',
+ cls=type(self).__name__,
+ vault_url=self._config['vault_url'])
+ client = hvac.Client(url=self._config['vault_url'])
+ client.auth_approle(self._config['role_id'],
+ self._config['secret_id'])
+ return client
+ except (requests.exceptions.ConnectionError,
+ hvac.exceptions.VaultDown,
+ hvac.exceptions.VaultNotInitialized,
+ hvac.exceptions.BadGateway) as e:
+ raise VaultNotReady() from e
+
+ @property
+ def _config(self):
+ _VaultBaseKV._config = get_vault_config()
+ return _VaultBaseKV._config
+
+ def __setitem__(self, key, value):
+ log('Writing data to vault')
+ self._client.write(self._path, **{key: value})
+ super().__setitem__(key, value)
+
+ def set(self, key, value):
+ # alias in case a KV-like interface is preferred
+ self[key] = value
+
+
+class VaultUnitKV(_VaultBaseKV):
+ """
+ A simplified interface for storing data in Vault, with the data scoped to
+ the current unit.
+
+ Keys must be strings, but data can be structured as long as it is
+ JSON-serializable.
+
+ This class can be used as a dict, or you can use `self.get` and `self.set`
+ for a more KV-like interface. When values are set, via either style, they
+ are immediately persisted to Vault. Values are also cached in memory.
+
+ Note: This class is a singleton.
+ """
+ def __init__(self):
+ unit_num = hookenv.local_unit().split('/')[1]
+ self._path = '{}/kv/unit/{}'.format(self._config['secret_backend'],
+ unit_num)
+ super().__init__()
+
+
+class VaultAppKV(_VaultBaseKV):
+ """
+ A simplified interface for storing data in Vault, with data shared by every
+ unit of the application.
+
+ Keys must be strings, but data can be structured as long as it is
+ JSON-serializable.
+
+ This class can be used as a dict, or you can use `self.get` and `self.set`
+ for a more KV-like interface. When values are set, via either style, they
+ are immediately persisted to Vault. Values are also cached in memory.
+
+ Note: This is intended to be used as a secure replacement for leadership
+ data. Therefore, only the leader should set data here. This is not
+ enforced, but data changed by non-leaders will not trigger hooks on other
+ units, so they may not be notified of changes in a timely fashion.
+
+ Note: This class is a singleton.
+ """
+ def __init__(self):
+ self._path = '{}/kv/app'.format(self._config['secret_backend'])
+ self._hash_path = '{}/kv/app-hashes/{}'.format(
+ self._config['secret_backend'],
+ hookenv.local_unit().split('/')[1])
+ super().__init__()
+ self._load_hashes()
+
+ def _load_hashes(self):
+ log('Reading hashes from {}', self._hash_path)
+ response = self._client.read(self._hash_path)
+ self._old_hashes = response['data'] if response else {}
+ self._new_hashes = {}
+ for key in self.keys():
+ self._rehash(key)
+
+ def _rehash(self, key):
+ serialized = json.dumps(self[key], sort_keys=True).encode('utf8')
+ self._new_hashes[key] = md5(serialized).hexdigest()
+
+ def __setitem__(self, key, value):
+ super().__setitem__(key, value)
+ self._rehash(key)
+ self._manage_flags(key)
+
+ def _manage_flags(self, key):
+ flag_any_changed = 'layer.vault-kv.app-kv.changed'
+ flag_key_changed = 'layer.vault-kv.app-kv.changed.{}'.format(key)
+ flag_key_set = 'layer.vault-kv.app-kv.set.{}'.format(key)
+ if self.is_changed(key):
+ # clear then set flag to ensure triggers are run even if the main
+ # flag was never cleared
+ clear_flag(flag_any_changed)
+ set_flag(flag_any_changed)
+ clear_flag(flag_key_changed)
+ set_flag(flag_key_changed)
+ if self.get(key) is not None:
+ set_flag(flag_key_set)
+ else:
+ clear_flag(flag_key_set)
+
+ @classmethod
+ def _clear_all_flags(cls):
+ for flag in get_flags():
+ if flag.startswith('layer.vault-kv.app-kv.'):
+ clear_flag(flag)
+
+ def is_changed(self, key):
+ """
+ Determine if the value for the given key has changed.
+
+ In order to detect changes, hashes of the values are also stored
+ in Vault. These hashes are updated automatically at exit via
+ `self.update_hashes()`.
+ """
+ return self._new_hashes.get(key) != self._old_hashes.get(key)
+
+ def any_changed(self):
+ """
+ Determine if any data has changed.
+
+ In order to detect changes, hashes of the values are also stored
+ in Vault. These hashes are updated automatically at exit via
+ `self.update_hashes()`.
+ """
+ all_keys = self._new_hashes.keys() | self._old_hashes.keys()
+ return any(self.is_changed(key) for key in all_keys)
+
+ def update_hashes(self):
+ """
+ Update the hashes in Vault, thus marking all fields as unchanged.
+
+ This is done automatically at exit.
+ """
+ log('Writing hashes to {}', self._hash_path)
+ self._client.write(self._hash_path, **self._new_hashes)
+ self._old_hashes.clear()
+ self._old_hashes.update(self._new_hashes)
+
+
+def get_vault_config():
+ """
+ Get the config data needed for this application to access Vault.
+
+ This is only needed if you're using another application, such as
+ VaultLocker, using the secrets backend provided by this layer.
+
+ Returns a dictionary containing the following keys:
+
+ * vault_url
+ * secret_backend
+ * role_id
+ * secret_id
+
+ Note: This data is cached in [UnitData][] so anything with access to that
+ could access Vault as this application.
+
+ If any of this data changes (such as the secret_id being rotated), this
+ layer will set the `layer.vault-kv.config.changed` flag.
+
+ If this is called before the Vault relation is available, it will raise
+ `VaultNotReady`.
+
+ [UnitData]: https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.unitdata.html
+ """ # noqa
+ vault = endpoint_from_flag('vault-kv.available')
+ if not (vault and vault.vault_url and vault.unit_role_id and
+ vault.unit_token):
+ raise VaultNotReady()
+ vault_config = {
+ 'vault_url': vault.vault_url,
+ 'secret_backend': _get_secret_backend(),
+ 'role_id': vault.unit_role_id,
+ 'secret_id': _get_secret_id(vault),
+ }
+ return vault_config
+
+
+def _get_secret_backend():
+ app_name = hookenv.application_name()
+ return 'charm-{}'.format(app_name)
+
+
+def _get_secret_id(vault):
+ token = vault.unit_token
+ if data_changed('layer.vault-kv.token', token):
+ log('Changed unit_token, getting new secret_id')
+ # token is one-shot, but if it changes it might mean that we're
+ # being told to rotate the secret ID, or we might not have fetched
+ # one yet
+ vault_url = vault.vault_url
+ secret_id = retrieve_secret_id(vault_url, token)
+ unitdata.kv().set('layer.vault-kv.secret_id', secret_id)
+ # have to flush immediately because if we don't and hit some error
+ # elsewhere, it could get us into a state where we have forgotten the
+ # secret ID and can't retrieve it again because we've already used the
+ # token
+ unitdata.kv().flush()
+ else:
+ secret_id = unitdata.kv().get('layer.vault-kv.secret_id')
+ return secret_id
diff --git a/kubernetes-master/lib/charms/layer/vaultlocker.py b/kubernetes-master/lib/charms/layer/vaultlocker.py
new file mode 100644
index 0000000..235224c
--- /dev/null
+++ b/kubernetes-master/lib/charms/layer/vaultlocker.py
@@ -0,0 +1,170 @@
+import json
+from pathlib import Path
+from subprocess import check_call, check_output, CalledProcessError
+from uuid import uuid4
+
+from charms.reactive import set_flag
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import unitdata
+from charmhelpers.contrib.openstack.vaultlocker import ( # noqa
+ retrieve_secret_id,
+ write_vaultlocker_conf,
+)
+from charmhelpers.contrib.storage.linux.utils import (
+ is_block_device,
+ is_device_mounted,
+ mkfs_xfs,
+)
+
+
+LOOP_ENVS = Path('/etc/vaultlocker/loop-envs')
+
+
+class VaultLockerError(Exception):
+ """
+ Wrapper for exceptions raised when configuring VaultLocker.
+ """
+ def __init__(self, msg, *args, **kwargs):
+ super().__init__(msg.format(*args, **kwargs))
+
+
+def encrypt_storage(storage_name, mountbase=None):
+ """
+ Set up encryption for the given Juju storage entry, and optionally create
+ and mount XFS filesystems on the encrypted storage entry location(s).
+
+ Note that the storage entry **must** be defined with ``type: block``.
+
+ If ``mountbase`` is not given, the location(s) will not be formatted or
+ mounted. When interacting with or mounting the location(s) manually, the
+ name returned by :func:`decrypted_device` called on the storage entry's
+ location should be used in place of the raw location.
+
+ If the storage is defined as ``multiple``, the individual locations
+ will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}``
+ is based on the storage ID. Otherwise, the storage will mounted at
+ ``{mountbase}/{storage_name}``.
+ """
+ metadata = hookenv.metadata()
+ storage_metadata = metadata['storage'][storage_name]
+ if storage_metadata['type'] != 'block':
+ raise VaultLockerError('Cannot encrypt non-block storage: {}',
+ storage_name)
+ multiple = 'multiple' in storage_metadata
+ for storage_id in hookenv.storage_list():
+ if not storage_id.startswith(storage_name + '/'):
+ continue
+ storage_location = hookenv.storage_get('location', storage_id)
+ if mountbase and multiple:
+ mountpoint = Path(mountbase) / storage_id
+ elif mountbase:
+ mountpoint = Path(mountbase) / storage_name
+ else:
+ mountpoint = None
+ encrypt_device(storage_location, mountpoint)
+ set_flag('layer.vaultlocker.{}.ready'.format(storage_id))
+ set_flag('layer.vaultlocker.{}.ready'.format(storage_name))
+
+
+def encrypt_device(device, mountpoint=None, uuid=None):
+ """
+ Set up encryption for the given block device, and optionally create and
+ mount an XFS filesystem on the encrypted device.
+
+ If ``mountpoint`` is not given, the device will not be formatted or
+ mounted. When interacting with or mounting the device manually, the
+ name returned by :func:`decrypted_device` called on the device name
+ should be used in place of the raw device name.
+ """
+ if not is_block_device(device):
+ raise VaultLockerError('Cannot encrypt non-block device: {}', device)
+ if is_device_mounted(device):
+ raise VaultLockerError('Cannot encrypt mounted device: {}', device)
+ hookenv.log('Encrypting device: {}'.format(device))
+ if uuid is None:
+ uuid = str(uuid4())
+ try:
+ check_call(['vaultlocker', 'encrypt', '--uuid', uuid, device])
+ unitdata.kv().set('layer.vaultlocker.uuids.{}'.format(device), uuid)
+ if mountpoint:
+ mapped_device = decrypted_device(device)
+ hookenv.log('Creating filesystem on {} ({})'.format(mapped_device,
+ device))
+ # If this fails, it's probalby due to the size of the loopback
+ # backing file that is defined by the `dd`.
+ mkfs_xfs(mapped_device)
+ Path(mountpoint).mkdir(mode=0o755, parents=True, exist_ok=True)
+ hookenv.log('Mounting filesystem for {} ({}) at {}'
+ ''.format(mapped_device, device, mountpoint))
+ host.mount(mapped_device, mountpoint, filesystem='xfs')
+ host.fstab_add(mapped_device, mountpoint, 'xfs', ','.join([
+ "defaults",
+ "nofail",
+ "x-systemd.requires=vaultlocker-decrypt@{uuid}.service".format(
+ uuid=uuid,
+ ),
+ "comment=vaultlocker",
+ ]))
+ except (CalledProcessError, OSError) as e:
+ raise VaultLockerError('Error configuring VaultLocker') from e
+
+
+def decrypted_device(device):
+ """
+ Returns the mapped device name for the decrypted version of the encrypted
+ device.
+
+ This mapped device name is what should be used for mounting the device.
+ """
+ uuid = unitdata.kv().get('layer.vaultlocker.uuids.{}'.format(device))
+ if not uuid:
+ return None
+ return '/dev/mapper/crypt-{uuid}'.format(uuid=uuid)
+
+
+def create_encrypted_loop_mount(mount_path, block_size='1M', block_count=20,
+ backing_file=None):
+ """
+ Creates a persistent loop device, encrypts it, formats it as XFS, and
+ mounts it at the given `mount_path`.
+
+ A backing file will be created under `/var/lib/vaultlocker/backing_files`,
+ in a UUID named file, according to `block_size` and `block_count`
+ parameters, which map to `bs` and `count` of the `dd` command. Note that
+ the backing file must be a bit over 16M to allow for the XFS file system
+ plus some additional metadata needed for the encryption. It is not
+ recommended to go below the default of 20M (20 blocks, 1M each).
+
+ The `backing_file` parameter can be used to change the location where the
+ backing file is created.
+ """
+ uuid = str(uuid4())
+ if backing_file is None:
+ backing_file = Path('/var/lib/vaultlocker/backing_files') / uuid
+ backing_file.parent.mkdir(parents=True, exist_ok=True)
+ else:
+ backing_file = Path(backing_file)
+ if backing_file.exists():
+ raise VaultLockerError('Backing file already exists: {}',
+ backing_file)
+
+ try:
+ # ensure loop devices are enabled
+ check_call(['modprobe', 'loop'])
+ # create the backing file filled with random data
+ check_call(['dd', 'if=/dev/urandom', 'of={}'.format(backing_file),
+ 'bs=8M', 'count=4'])
+ # claim an unused loop device
+ output = check_output(['losetup', '--show', '-f', str(backing_file)])
+ device_name = output.decode('utf8').strip()
+ # encrypt the new loop device
+ encrypt_device(device_name, str(mount_path), uuid)
+ # setup the service to ensure loop device is restored after reboot
+ (LOOP_ENVS / uuid).write_text(''.join([
+ 'BACK_FILE={}\n'.format(backing_file),
+ ]))
+ check_call(['systemctl', 'enable',
+ 'vaultlocker-loop@{}.service'.format(uuid)])
+ except (CalledProcessError, OSError) as e:
+ raise VaultLockerError('Error configuring VaultLocker') from e
diff --git a/kubernetes-master/lib/charms/leadership.py b/kubernetes-master/lib/charms/leadership.py
new file mode 100644
index 0000000..d2a95fa
--- /dev/null
+++ b/kubernetes-master/lib/charms/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.reactive import not_unless
+
+
+__all__ = ['leader_get', 'leader_set']
+
+
+@not_unless('leadership.is_leader')
+def leader_set(*args, **kw):
+ '''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
+
+ Settings may either be passed in as a single dictionary, or using
+ keyword arguments. All values must be strings.
+
+ The leadership.set.{key} reactive state will be set while the
+ leadership hook environment setting remains set.
+
+ Changed leadership settings will set the leadership.changed.{key}
+ and leadership.changed states. These states will remain set until
+ the following hook.
+
+ These state changes take effect immediately on the leader, and
+ in future hooks run on non-leaders. In this way both leaders and
+ non-leaders can share handlers, waiting on these states.
+ '''
+ if args:
+ if len(args) > 1:
+ raise TypeError('leader_set() takes 1 positional argument but '
+ '{} were given'.format(len(args)))
+ else:
+ settings = dict(args[0])
+ else:
+ settings = {}
+ settings.update(kw)
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+
+ for key, value in settings.items():
+ if value != previous.get(key):
+ reactive.set_state('leadership.changed.{}'.format(key))
+ reactive.set_state('leadership.changed')
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ hookenv.leader_set(settings)
+ unitdata.kv().update(settings, prefix='leadership.settings.')
+
+
+def leader_get(attribute=None):
+ '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
+ return hookenv.leader_get(attribute)
diff --git a/kubernetes-master/lib/debug_script.py b/kubernetes-master/lib/debug_script.py
new file mode 100644
index 0000000..e156924
--- /dev/null
+++ b/kubernetes-master/lib/debug_script.py
@@ -0,0 +1,8 @@
+import os
+
+dir = os.environ["DEBUG_SCRIPT_DIR"]
+
+
+def open_file(path, *args, **kwargs):
+ """ Open a file within the debug script dir """
+ return open(os.path.join(dir, path), *args, **kwargs)
diff --git a/kubernetes-master/lxd-profile.yaml b/kubernetes-master/lxd-profile.yaml
new file mode 100644
index 0000000..6b4babc
--- /dev/null
+++ b/kubernetes-master/lxd-profile.yaml
@@ -0,0 +1,16 @@
+name: juju-default-k8s-deployment-0
+config:
+ linux.kernel_modules: ip_tables,ip6_tables,netlink_diag,nf_nat,overlay
+ raw.lxc: |
+ lxc.apparmor.profile=unconfined
+ lxc.mount.auto=proc:rw sys:rw
+ lxc.cgroup.devices.allow=a
+ lxc.cap.drop=
+ security.nesting: true
+ security.privileged: true
+description: ""
+devices:
+ aadisable:
+ path: /dev/kmsg
+ source: /dev/kmsg
+ type: unix-char
diff --git a/kubernetes-master/make_docs b/kubernetes-master/make_docs
new file mode 100644
index 0000000..dcd4c1f
--- /dev/null
+++ b/kubernetes-master/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import os
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ sys.path.insert(0, 'lib')
+ sys.path.insert(1, 'reactive')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
+ if os.path.exists('.unit-state.db'):
+ os.remove('.unit-state.db')
diff --git a/kubernetes-master/metadata.yaml b/kubernetes-master/metadata.yaml
new file mode 100644
index 0000000..2446cb1
--- /dev/null
+++ b/kubernetes-master/metadata.yaml
@@ -0,0 +1,116 @@
+"name": "kubernetes-master"
+"summary": "The Kubernetes control plane."
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Rye Terrell "
+- "Konstantinos Tsakalozos "
+- "Charles Butler "
+- "Matthew Bruzek "
+- "Mike Wilson "
+- "Joe Borg "
+"description": |
+ Kubernetes is an open-source platform for deploying, scaling, and operations
+ of application containers across a cluster of hosts. Kubernetes is portable
+ in that it works with public, private, and hybrid clouds. Extensible through
+ a pluggable infrastructure. Self healing in that it will automatically
+ restart and place containers on healthy nodes if a node ever goes away.
+"tags":
+- "misc"
+- "infrastructure"
+- "kubernetes"
+- "master"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "certificates":
+ "interface": "tls-certificates"
+ "vault-kv":
+ "interface": "vault-kv"
+ "ha":
+ "interface": "hacluster"
+ "etcd":
+ "interface": "etcd"
+ "loadbalancer":
+ "interface": "public-address"
+ "ceph-storage":
+ "interface": "ceph-admin"
+ "ceph-client":
+ "interface": "ceph-client"
+ "aws":
+ "interface": "aws-integration"
+ "gcp":
+ "interface": "gcp-integration"
+ "openstack":
+ "interface": "openstack-integration"
+ "vsphere":
+ "interface": "vsphere-integration"
+ "azure":
+ "interface": "azure-integration"
+ "keystone-credentials":
+ "interface": "keystone-credentials"
+ "dns-provider":
+ "interface": "kube-dns"
+"provides":
+ "nrpe-external-master":
+ "interface": "nrpe-external-master"
+ "scope": "container"
+ "container-runtime":
+ "interface": "container-runtime"
+ "scope": "container"
+ "kube-api-endpoint":
+ "interface": "http"
+ "cluster-dns":
+ # kube-dns is deprecated. Its functionality has been rolled into the
+ # kube-control interface. The cluster-dns relation will be removed in
+ # a future release.
+ "interface": "kube-dns"
+ "kube-control":
+ "interface": "kube-control"
+ "cni":
+ "interface": "kubernetes-cni"
+ "scope": "container"
+ "prometheus":
+ "interface": "prometheus-manual"
+ "grafana":
+ "interface": "grafana-dashboard"
+ "aws-iam":
+ "interface": "aws-iam"
+ "scope": "container"
+"peers":
+ "coordinator":
+ "interface": "coordinator"
+ "kube-masters":
+ "interface": "kube-masters"
+"resources":
+ "core":
+ "type": "file"
+ "filename": "core.snap"
+ "description": "core snap"
+ "kubectl":
+ "type": "file"
+ "filename": "kubectl.snap"
+ "description": "kubectl snap"
+ "kube-apiserver":
+ "type": "file"
+ "filename": "kube-apiserver.snap"
+ "description": "kube-apiserver snap"
+ "kube-controller-manager":
+ "type": "file"
+ "filename": "kube-controller-manager.snap"
+ "description": "kube-controller-manager snap"
+ "kube-scheduler":
+ "type": "file"
+ "filename": "kube-scheduler.snap"
+ "description": "kube-scheduler snap"
+ "cdk-addons":
+ "type": "file"
+ "filename": "cdk-addons.snap"
+ "description": "CDK addons snap"
+ "kube-proxy":
+ "type": "file"
+ "filename": "kube-proxy.snap"
+ "description": "kube-proxy snap"
+"subordinate": !!bool "false"
diff --git a/kubernetes-master/metrics.yaml b/kubernetes-master/metrics.yaml
new file mode 100644
index 0000000..0d422ff
--- /dev/null
+++ b/kubernetes-master/metrics.yaml
@@ -0,0 +1,38 @@
+metrics:
+ juju-units: {}
+ pods:
+ type: gauge
+ description: number of pods
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get po --all-namespaces | tail -n+2 | wc -l
+ services:
+ type: gauge
+ description: number of services
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get svc --all-namespaces | tail -n+2 | wc -l
+ replicasets:
+ type: gauge
+ description: number of replicasets
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get rs --all-namespaces | tail -n+2 | wc -l
+ replicationcontrollers:
+ type: gauge
+ description: number of replicationcontrollers
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get rc --all-namespaces | tail -n+2 | wc -l
+ nodes:
+ type: gauge
+ description: number of kubernetes nodes
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get nodes | tail -n+2 | wc -l
+ nodes-gpu:
+ type: gauge
+ description: number of gpu-enabled kubernetes nodes
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get nodes -l gpu=true -o name | wc -l
+ persistentvolume:
+ type: gauge
+ description: number of pv
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get pv | tail -n+2 | wc -l
+ persistentvolumeclaims:
+ type: gauge
+ description: number of claims
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get pvc --all-namespaces | tail -n+2 | wc -l
+ serviceaccounts:
+ type: gauge
+ description: number of sa
+ command: /snap/bin/kubectl --kubeconfig /root/.kube/config get sa --all-namespaces | tail -n+2 | wc -l
diff --git a/kubernetes-master/pydocmd.yml b/kubernetes-master/pydocmd.yml
new file mode 100644
index 0000000..7b3a610
--- /dev/null
+++ b/kubernetes-master/pydocmd.yml
@@ -0,0 +1,10 @@
+site_name: 'VaultLocker Block Device Encryption Layer'
+
+generate:
+ - vaultlocker.md:
+ - charms.layer.vaultlocker+
+
+pages:
+ - VaultLocker Block Device Encryption Layer: vaultlocker.md
+
+gens_dir: docs
diff --git a/kubernetes-master/pyproject.toml b/kubernetes-master/pyproject.toml
new file mode 100644
index 0000000..db0dcd0
--- /dev/null
+++ b/kubernetes-master/pyproject.toml
@@ -0,0 +1,3 @@
+[tool.black]
+line-length=120
+target-version=['py35']
diff --git a/kubernetes-master/reactive/__init__.py b/kubernetes-master/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-master/reactive/apt.py b/kubernetes-master/reactive/apt.py
new file mode 100644
index 0000000..8832296
--- /dev/null
+++ b/kubernetes-master/reactive/apt.py
@@ -0,0 +1,158 @@
+# Copyright 2015-2020 Canonical Ltd.
+#
+# This file is part of the Apt layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+'''
+charms.reactive helpers for dealing with deb packages.
+
+Add apt package sources using add_source(). Queue deb packages for
+installation with install(). Configure and work with your software
+once the apt.installed.{packagename} flag is set.
+'''
+import os.path
+import subprocess
+import re
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv
+from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
+from charms import layer
+from charms.layer import status
+from charms import reactive
+from charms.reactive import when, when_not
+
+import charms.apt
+
+
+@when('apt.needs_update')
+def update():
+ charms.apt.update()
+
+
+@when('apt.queued_installs')
+@when_not('apt.needs_update')
+def install_queued():
+ charms.apt.install_queued()
+
+
+@when_not('apt.queued_installs')
+def ensure_package_status():
+ charms.apt.ensure_package_status()
+
+
+def filter_installed_packages(packages):
+ # Don't use fetch.filter_installed_packages, as it depends on python-apt
+ # and not available if the basic layer's use_site_packages option is off
+ cmd = ['dpkg-query', '--show', r'--showformat=${Package}\n']
+ installed = set(subprocess.check_output(cmd, universal_newlines=True).split())
+
+ # list of packages that are not installed
+ not_installed = set(packages) - installed
+
+ # now we want to check for any regex in the installation of the packages
+ not_installed_iterable = not_installed.copy()
+ for pkg in not_installed_iterable:
+ # grab the pattern that we want to match against the packages
+ p = re.compile(pkg)
+ for pkg2 in installed:
+ matched = p.search(pkg2)
+ if matched:
+ not_installed.remove(pkg)
+ break
+
+ return not_installed
+
+
+def clear_removed_package_flags():
+ """On hook startup, clear install flags for removed packages."""
+ removed = filter_installed_packages(charms.apt.installed())
+ if removed:
+ hookenv.log('{} missing packages ({})'.format(len(removed), ','.join(removed)), WARNING)
+ for package in removed:
+ reactive.clear_flag('apt.installed.{}'.format(package))
+
+
+def add_implicit_signing_keys():
+ """Add keys specified in layer.yaml
+
+ The charm can ship trusted keys, avoiding the need to specify
+ them in config.yaml. We need to add them before we attempt
+ to add any custom sources, or apt will block under Bionic
+ if we attempt to add a source before the key becomes trusted.
+ """
+ opts = layer.options()
+ if 'apt' not in opts or 'keys' not in opts['apt']:
+ return
+ keys = opts['apt']['keys']
+ for p in keys:
+ full_p = os.path.join(hookenv.charm_dir(), p)
+ if os.path.exists(full_p):
+ hookenv.log("Adding key {}".format(p), DEBUG)
+ subprocess.check_call(
+ ['apt-key', 'add', full_p],
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ else:
+ hookenv.log('Key {!r} does not exist'.format(full_p), ERROR)
+
+
+def configure_sources():
+ """Add user specified package sources from the service configuration.
+
+ See charmhelpers.fetch.configure_sources for details.
+ """
+ config = hookenv.config()
+
+ # We don't have enums, so we need to validate this ourselves.
+ package_status = config.get('package_status') or ''
+ if package_status not in ('hold', 'install'):
+ status.blocked('Unknown package_status {}'.format(package_status))
+ # Die before further hooks are run. This isn't very nice, but
+ # there is no other way to inform the operator that they have
+ # invalid configuration.
+ raise SystemExit(0)
+
+ sources = config.get('install_sources') or ''
+ keys = config.get('install_keys') or ''
+ if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)):
+ fetch.configure_sources(update=False, sources_var='install_sources', keys_var='install_keys')
+ reactive.set_flag('apt.needs_update')
+
+ # Clumsy 'config.get() or' per Bug #1641362
+ extra_packages = sorted((config.get('extra_packages') or '').split())
+ if extra_packages:
+ charms.apt.queue_install(extra_packages)
+
+
+def queue_layer_packages():
+ """Add packages listed in build-time layer options."""
+ # Both basic and apt layer. basic layer will have already installed
+ # its defined packages, but rescheduling it here gets the apt layer
+ # flag set and they will pinned as any other apt layer installed
+ # package.
+ opts = layer.options()
+ for section in ['basic', 'apt']:
+ if section in opts and 'packages' in opts[section]:
+ charms.apt.queue_install(opts[section]['packages'])
+
+
+hookenv.atstart(hookenv.log, 'Initializing Apt Layer')
+hookenv.atstart(clear_removed_package_flags)
+hookenv.atstart(add_implicit_signing_keys)
+hookenv.atstart(configure_sources)
+hookenv.atstart(queue_layer_packages)
+hookenv.atstart(charms.apt.reset_application_version)
diff --git a/kubernetes-master/reactive/cdk_service_kicker.py b/kubernetes-master/reactive/cdk_service_kicker.py
new file mode 100644
index 0000000..f7fd33a
--- /dev/null
+++ b/kubernetes-master/reactive/cdk_service_kicker.py
@@ -0,0 +1,32 @@
+import os
+import subprocess
+from charms import layer
+from charms.reactive import hook, when_not, remove_state, set_state
+from charmhelpers.core.templating import render
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ remove_state('cdk-service-kicker.installed')
+
+
+@when_not('cdk-service-kicker.installed')
+def install_cdk_service_kicker():
+ ''' Installs the cdk-service-kicker service. Workaround for
+ https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+ '''
+ source = 'cdk-service-kicker'
+ dest = '/usr/bin/cdk-service-kicker'
+ services = layer.options('cdk-service-kicker').get('services')
+ context = {'services': ' '.join(services)}
+ render(source, dest, context)
+ os.chmod('/usr/bin/cdk-service-kicker', 0o775)
+
+ source = 'cdk-service-kicker.service'
+ dest = '/etc/systemd/system/cdk-service-kicker.service'
+ context = {}
+ render(source, dest, context)
+ command = ['systemctl', 'enable', 'cdk-service-kicker']
+ subprocess.check_call(command)
+
+ set_state('cdk-service-kicker.installed')
diff --git a/kubernetes-master/reactive/coordinator.py b/kubernetes-master/reactive/coordinator.py
new file mode 100644
index 0000000..474a95d
--- /dev/null
+++ b/kubernetes-master/reactive/coordinator.py
@@ -0,0 +1,71 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charms.coordinator import coordinator, log
+import charms.reactive
+
+
+def initialize_coordinator_state():
+ '''
+ The coordinator.granted.{lockname} state will be set and the
+ coordinator.requested.{lockname} state removed for every lock
+ granted to the currently running hook.
+
+ The coordinator.requested.{lockname} state will remain set for locks
+ not yet granted
+ '''
+ log('Initializing coordinator layer')
+
+ requested = set(coordinator.requests.get(hookenv.local_unit(), {}).keys())
+ previously_requested = set(state.split('.', 2)[2]
+ for state in charms.reactive.bus.get_states()
+ if state.startswith('coordinator.requested.'))
+
+ granted = set(coordinator.grants.get(hookenv.local_unit(), {}).keys())
+ previously_granted = set(state.split('.', 2)[2]
+ for state in charms.reactive.bus.get_states()
+ if state.startswith('coordinator.granted.'))
+
+ # Set reactive state for requested locks.
+ for lock in requested:
+ log('Requested {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.set_state('coordinator.requested.{}'.format(lock))
+
+ # Set reactive state for locks that have been granted.
+ for lock in granted:
+ log('Granted {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.set_state('coordinator.granted.{}'.format(lock))
+
+ # Remove reactive state for locks that have been released.
+ for lock in (previously_granted - granted):
+ log('Dropped {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.remove_state('coordinator.granted.{}'.format(lock))
+
+ # Remove requested state for locks no longer requested and not granted.
+ for lock in (previously_requested - requested - granted):
+ log('Request for {} lock was dropped'.format(lock), hookenv.DEBUG)
+ charms.reactive.remove_state('coordinator.requested.{}'.format(lock))
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(charms.reactive, '_coordinator_registered'):
+ hookenv.atstart(initialize_coordinator_state)
+ charms.reactive._coordinator_registered = True
diff --git a/kubernetes-master/reactive/hacluster.py b/kubernetes-master/reactive/hacluster.py
new file mode 100644
index 0000000..f921f76
--- /dev/null
+++ b/kubernetes-master/reactive/hacluster.py
@@ -0,0 +1,110 @@
+from charms import layer
+
+from charms.reactive import hook
+from charms.reactive import when, when_not, clear_flag, set_flag, is_flag_set
+from charms.reactive import endpoint_from_flag
+
+from charms.layer.kubernetes_common import get_ingress_address
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+db = unitdata.kv()
+
+
+@hook('upgrade-charm')
+def do_upgrade():
+ # bump the services from upstart to systemd. :-/
+ hacluster = endpoint_from_flag('ha.connected')
+ if not hacluster:
+ return
+
+ if not is_flag_set('layer-hacluster.upgraded-systemd'):
+ services = db.get('layer-hacluster.services', {'current_services': {},
+ 'desired_services': {},
+ 'deleted_services': {}})
+ for name, service in services['current_services'].items():
+ hookenv.log("changing service {} to systemd service".format(name))
+ hacluster.remove_init_service(name, service)
+ hacluster.add_systemd_service(name, service)
+
+ # change any pending lsb entries to systemd
+ for name, service in services['desired_services'].items():
+ msg = "changing pending service {} to systemd service"
+ hookenv.log(msg.format(name))
+ hacluster.remove_init_service(name, service)
+ hacluster.add_systemd_service(name, service)
+
+ clear_flag('layer-hacluster.configured')
+ set_flag('layer-hacluster.upgraded-systemd')
+
+
+@when('ha.connected', 'layer.hacluster.services_configured')
+@when_not('layer-hacluster.configured')
+def configure_hacluster():
+ """Configure HA resources in corosync"""
+ hacluster = endpoint_from_flag('ha.connected')
+ vips = hookenv.config('ha-cluster-vip').split()
+ dns_record = hookenv.config('ha-cluster-dns')
+ if vips and dns_record:
+ set_flag('layer-hacluster.dns_vip.invalid')
+ msg = "Unsupported configuration. " \
+ "ha-cluster-vip and ha-cluster-dns cannot both be set",
+ hookenv.log(msg)
+ return
+ else:
+ clear_flag('layer-hacluster.dns_vip.invalid')
+ if vips:
+ for vip in vips:
+ hacluster.add_vip(hookenv.application_name(), vip)
+ elif dns_record:
+ layer_options = layer.options('hacluster')
+ binding_address = layer_options.get('binding_address')
+ ip = get_ingress_address(binding_address)
+ hacluster.add_dnsha(hookenv.application_name(), ip, dns_record,
+ 'public')
+
+ services = db.get('layer-hacluster.services', {'current_services': {},
+ 'desired_services': {},
+ 'deleted_services': {}})
+ for name, service in services['deleted_services'].items():
+ hacluster.remove_systemd_service(name, service)
+ for name, service in services['desired_services'].items():
+ hacluster.add_systemd_service(name, service)
+ services['current_services'][name] = service
+
+ services['deleted_services'] = {}
+ services['desired_services'] = {}
+
+ hacluster.bind_resources()
+ set_flag('layer-hacluster.configured')
+
+
+@when('config.changed.ha-cluster-vip',
+ 'ha.connected')
+def update_vips():
+ hacluster = endpoint_from_flag('ha.connected')
+ config = hookenv.config()
+ original_vips = set(config.previous('ha-cluster-vip').split())
+ new_vips = set(config['ha-cluster-vip'].split())
+ old_vips = original_vips - new_vips
+
+ for vip in old_vips:
+ hacluster.remove_vip(hookenv.application_name(), vip)
+
+ clear_flag('layer-hacluster.configured')
+
+
+@when('config.changed.ha-cluster-dns',
+ 'ha.connected')
+def update_dns():
+ hacluster = endpoint_from_flag('ha.connected')
+ config = hookenv.config()
+ original_dns = set(config.previous('ha-cluster-dns').split())
+ new_dns = set(config['ha-cluster-dns'].split())
+ old_dns = original_dns - new_dns
+
+ for dns in old_dns:
+ hacluster.remove_dnsha(hookenv.application_name, 'public')
+
+ clear_flag('layer-hacluster.configured')
diff --git a/kubernetes-master/reactive/kubernetes_master.py b/kubernetes-master/reactive/kubernetes_master.py
new file mode 100644
index 0000000..448e191
--- /dev/null
+++ b/kubernetes-master/reactive/kubernetes_master.py
@@ -0,0 +1,3452 @@
+#!/usr/local/sbin/charm-env python3
+
+# Copyright 2015 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import os
+import re
+import shutil
+import socket
+import json
+import traceback
+import yaml
+
+from shutil import move, copyfile
+from pathlib import Path
+from subprocess import check_call
+from subprocess import check_output
+from subprocess import CalledProcessError
+from time import sleep
+from urllib.request import Request, urlopen
+
+import charms.coordinator
+from charms.layer import snap
+from charms.leadership import leader_get, leader_set
+from charms.reactive import hook
+from charms.reactive import remove_state, clear_flag
+from charms.reactive import set_state, set_flag
+from charms.reactive import is_state, is_flag_set, get_unset_flags, all_flags_set
+from charms.reactive import endpoint_from_flag
+from charms.reactive import when, when_any, when_not, when_none
+from charms.reactive import register_trigger
+from charms.reactive import data_changed, any_file_changed
+
+from charms.layer import tls_client
+from charms.layer import vaultlocker
+from charms.layer import vault_kv
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import unitdata
+from charmhelpers.core.host import restart_on_change
+from charmhelpers.core.host import service_pause, service_resume, service_stop
+from charmhelpers.core.templating import render
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charms.layer import kubernetes_master
+from charms.layer import kubernetes_common
+
+from charms.layer.hacluster import add_service_to_hacluster
+from charms.layer.hacluster import remove_service_from_hacluster
+from charms.layer.kubernetes_common import kubeclientconfig_path
+from charms.layer.kubernetes_common import migrate_resource_checksums
+from charms.layer.kubernetes_common import check_resources_for_upgrade_needed
+from charms.layer.kubernetes_common import (
+ calculate_and_store_resource_checksums,
+) # noqa
+from charms.layer.kubernetes_common import arch
+from charms.layer.kubernetes_common import service_restart
+from charms.layer.kubernetes_common import get_ingress_address
+from charms.layer.kubernetes_common import get_ingress_address6
+from charms.layer.kubernetes_common import create_kubeconfig
+from charms.layer.kubernetes_common import get_service_ip
+from charms.layer.kubernetes_common import configure_kubernetes_service
+from charms.layer.kubernetes_common import cloud_config_path
+from charms.layer.kubernetes_common import encryption_config_path
+from charms.layer.kubernetes_common import write_gcp_snap_config
+from charms.layer.kubernetes_common import generate_openstack_cloud_config
+from charms.layer.kubernetes_common import write_azure_snap_config
+from charms.layer.kubernetes_common import configure_kube_proxy
+from charms.layer.kubernetes_common import kubeproxyconfig_path
+from charms.layer.kubernetes_common import get_version
+from charms.layer.kubernetes_common import retry
+from charms.layer.kubernetes_common import ca_crt_path
+from charms.layer.kubernetes_common import server_crt_path
+from charms.layer.kubernetes_common import server_key_path
+from charms.layer.kubernetes_common import client_crt_path
+from charms.layer.kubernetes_common import client_key_path
+from charms.layer.kubernetes_common import kubectl, kubectl_manifest, kubectl_success
+from charms.layer.kubernetes_common import _get_vmware_uuid
+
+from charms.layer.nagios import install_nagios_plugin_from_file
+from charms.layer.nagios import remove_nagios_plugin
+
+
+# Override the default nagios shortname regex to allow periods, which we
+# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
+# default regex in charmhelpers doesn't allow periods, but nagios itself does.
+nrpe.Check.shortname_re = r"[\.A-Za-z0-9-_]+$"
+
+snap_resources = [
+ "kubectl",
+ "kube-apiserver",
+ "kube-controller-manager",
+ "kube-scheduler",
+ "cdk-addons",
+ "kube-proxy",
+]
+
+master_services = [
+ "kube-apiserver",
+ "kube-controller-manager",
+ "kube-scheduler",
+ "kube-proxy",
+]
+
+cohort_snaps = snap_resources + ["kubelet"]
+
+
+os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin")
+db = unitdata.kv()
+checksum_prefix = "kubernetes-master.resource-checksums."
+configure_prefix = "kubernetes-master.prev_args."
+keystone_root = "/root/cdk/keystone"
+keystone_policy_path = os.path.join(keystone_root, "keystone-policy.yaml")
+kubecontrollermanagerconfig_path = "/root/cdk/kubecontrollermanagerconfig"
+kubeschedulerconfig_path = "/root/cdk/kubeschedulerconfig"
+cdk_addons_kubectl_config_path = "/root/cdk/cdk_addons_kubectl_config"
+aws_iam_webhook = "/root/cdk/aws-iam-webhook.yaml"
+auth_webhook_root = "/root/cdk/auth-webhook"
+auth_webhook_conf = os.path.join(auth_webhook_root, "auth-webhook-conf.yaml")
+auth_webhook_exe = os.path.join(auth_webhook_root, "auth-webhook.py")
+auth_webhook_svc = "/etc/systemd/system/cdk.master.auth-webhook.service"
+
+register_trigger(
+ when="endpoint.aws.ready", set_flag="kubernetes-master.aws.changed" # when set
+)
+register_trigger(
+ when_not="endpoint.aws.ready", # when cleared
+ set_flag="kubernetes-master.aws.changed",
+)
+register_trigger(
+ when="endpoint.azure.ready", set_flag="kubernetes-master.azure.changed" # when set
+)
+register_trigger(
+ when_not="endpoint.azure.ready", # when cleared
+ set_flag="kubernetes-master.azure.changed",
+)
+register_trigger(
+ when="endpoint.gcp.ready", set_flag="kubernetes-master.gcp.changed" # when set
+)
+register_trigger(
+ when_not="endpoint.gcp.ready", # when cleared
+ set_flag="kubernetes-master.gcp.changed",
+)
+register_trigger(
+ when="kubernetes-master.ceph.configured", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when_not="kubernetes-master.ceph.configured", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when="keystone-credentials.available", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when_not="keystone-credentials.available", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when="kubernetes-master.aws.changed", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when="kubernetes-master.azure.changed", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when="kubernetes-master.gcp.changed", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when="kubernetes-master.openstack.changed", set_flag="cdk-addons.reconfigure"
+)
+register_trigger(
+ when_not="cni.available", clear_flag="kubernetes-master.components.started"
+)
+register_trigger(
+ when="kube-control.requests.changed", clear_flag="authentication.setup"
+)
+
+
+def set_upgrade_needed(forced=False):
+ set_state("kubernetes-master.upgrade-needed")
+ config = hookenv.config()
+ previous_channel = config.previous("channel")
+ require_manual = config.get("require-manual-upgrade")
+ hookenv.log("set upgrade needed")
+ if previous_channel is None or not require_manual or forced:
+ hookenv.log("forcing upgrade")
+ set_state("kubernetes-master.upgrade-specified")
+
+
+@when("config.changed.channel")
+def channel_changed():
+ set_upgrade_needed()
+
+
+def maybe_install_kube_proxy():
+ if not snap.is_installed("kube-proxy"):
+ channel = hookenv.config("channel")
+ hookenv.status_set("maintenance", "Installing kube-proxy snap")
+ snap.install("kube-proxy", channel=channel, classic=True)
+ calculate_and_store_resource_checksums(checksum_prefix, snap_resources)
+
+
+@hook("install")
+def fresh_install():
+ # fresh installs should always send the unique cluster tag to cdk-addons
+ set_state("kubernetes-master.cdk-addons.unique-cluster-tag")
+
+
+@hook("upgrade-charm")
+def check_for_upgrade_needed():
+ """An upgrade charm event was triggered by Juju, react to that here."""
+ hookenv.status_set("maintenance", "Checking resources")
+ is_leader = is_state("leadership.is_leader")
+
+ # migrate to new flags
+ if is_state("kubernetes-master.restarted-for-cloud"):
+ remove_state("kubernetes-master.restarted-for-cloud")
+ set_state("kubernetes-master.cloud.ready")
+ if is_state("kubernetes-master.cloud-request-sent"):
+ # minor change, just for consistency
+ remove_state("kubernetes-master.cloud-request-sent")
+ set_state("kubernetes-master.cloud.request-sent")
+
+ # ceph-storage.configured flag no longer exists
+ remove_state("ceph-storage.configured")
+
+ # reconfigure ceph. we need this in case we're reverting from ceph-csi back
+ # to old ceph on Kubernetes 1.10 or 1.11
+ remove_state("kubernetes-master.ceph.configured")
+
+ migrate_from_pre_snaps()
+ maybe_install_kube_proxy()
+ update_certificates()
+ switch_auth_mode(forced=True)
+
+ # File-based auth is gone in 1.19; ensure any entries in basic_auth.csv are
+ # added to known_tokens.csv, and any known_tokens entries are created as secrets.
+ if not is_flag_set("kubernetes-master.basic-auth.migrated"):
+ if kubernetes_master.migrate_auth_file(kubernetes_master.AUTH_BASIC_FILE):
+ set_flag("kubernetes-master.basic-auth.migrated")
+ else:
+ hookenv.log(
+ "Unable to migrate {} to {}".format(
+ kubernetes_master.AUTH_BASIC_FILE,
+ kubernetes_master.AUTH_TOKENS_FILE,
+ )
+ )
+ if not is_flag_set("kubernetes-master.token-auth.migrated"):
+ register_auth_webhook()
+ add_rbac_roles()
+ if kubernetes_master.migrate_auth_file(kubernetes_master.AUTH_TOKENS_FILE):
+ set_flag("kubernetes-master.token-auth.migrated")
+ else:
+ hookenv.log(
+ "Unable to migrate {} to Kubernetes secrets".format(
+ kubernetes_master.AUTH_TOKENS_FILE
+ )
+ )
+ set_state("reconfigure.authentication.setup")
+ remove_state("authentication.setup")
+
+ if not db.get("snap.resources.fingerprint.initialised"):
+ # We are here on an upgrade from non-rolling master
+ # Since this upgrade might also include resource updates eg
+ # juju upgrade-charm kubernetes-master --resource kube-any=my.snap
+ # we take no risk and forcibly upgrade the snaps.
+ # Forcibly means we do not prompt the user to call the upgrade action.
+ set_upgrade_needed(forced=True)
+
+ migrate_resource_checksums(checksum_prefix, snap_resources)
+ if check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
+ set_upgrade_needed()
+
+ # Set the auto storage backend to etcd2.
+ auto_storage_backend = leader_get("auto_storage_backend")
+ if not auto_storage_backend and is_leader:
+ leader_set(auto_storage_backend="etcd2")
+
+ if is_leader and not leader_get("auto_dns_provider"):
+ was_kube_dns = hookenv.config().previous("enable-kube-dns")
+ if was_kube_dns is True:
+ leader_set(auto_dns_provider="kube-dns")
+ elif was_kube_dns is False:
+ leader_set(auto_dns_provider="none")
+
+ if is_flag_set("nrpe-external-master.available"):
+ update_nrpe_config()
+
+ remove_state("kubernetes-master.system-monitoring-rbac-role.applied")
+
+
+@hook("pre-series-upgrade")
+def pre_series_upgrade():
+ """Stop the kubernetes master services"""
+ for service in master_services:
+ service_pause("snap.%s.daemon" % service)
+
+
+@hook("post-series-upgrade")
+def post_series_upgrade():
+ for service in master_services:
+ service_resume("snap.%s.daemon" % service)
+ # set ourselves up to restart
+ remove_state("kubernetes-master.components.started")
+
+
+@hook("leader-elected")
+def leader_elected():
+ clear_flag("authentication.setup")
+
+
+def add_rbac_roles():
+ """Update the known_tokens file with proper groups.
+
+ DEPRECATED: Once known_tokens are migrated, group data will be stored in K8s
+ secrets. Do not use this function after migrating to authn with secrets.
+ """
+ if is_flag_set("kubernetes-master.token-auth.migrated"):
+ hookenv.log("Known tokens have migrated to secrets. Skipping group changes")
+ return
+ tokens_fname = "/root/cdk/known_tokens.csv"
+ tokens_backup_fname = "/root/cdk/known_tokens.csv.backup"
+ move(tokens_fname, tokens_backup_fname)
+ with open(tokens_fname, "w") as ftokens:
+ with open(tokens_backup_fname, "r") as stream:
+ for line in stream:
+ if line.startswith("#"):
+ continue
+ record = line.strip().split(",")
+ try:
+ # valid line looks like: token,username,user,groups
+ if record[2] == "admin" and len(record) == 3:
+ towrite = '{0},{1},{2},"{3}"\n'.format(
+ record[0], record[1], record[2], "system:masters"
+ )
+ ftokens.write(towrite)
+ continue
+ if record[2] == "kube_proxy":
+ towrite = "{0},{1},{2}\n".format(
+ record[0], "system:kube-proxy", "kube-proxy"
+ )
+ ftokens.write(towrite)
+ continue
+ if record[2] == "kube_controller_manager":
+ towrite = "{0},{1},{2}\n".format(
+ record[0],
+ "system:kube-controller-manager",
+ "kube-controller-manager",
+ )
+ ftokens.write(towrite)
+ continue
+ if record[2] == "kubelet" and record[1] == "kubelet":
+ continue
+ except IndexError:
+ msg = "Skipping invalid line from {}: {}".format(
+ tokens_backup_fname, line
+ )
+ hookenv.log(msg, level=hookenv.DEBUG)
+ continue
+ else:
+ ftokens.write("{}".format(line))
+
+
+def rename_file_idempotent(source, destination):
+ if os.path.isfile(source):
+ os.rename(source, destination)
+
+
+def migrate_from_pre_snaps():
+ # remove old states
+ remove_state("kubernetes.components.installed")
+ remove_state("kubernetes.dashboard.available")
+ remove_state("kube-dns.available")
+ remove_state("kubernetes-master.app_version.set")
+
+ # disable old services
+ pre_snap_services = ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
+ for service in pre_snap_services:
+ service_stop(service)
+
+ # rename auth files
+ os.makedirs("/root/cdk", exist_ok=True)
+ rename_file_idempotent(
+ "/etc/kubernetes/serviceaccount.key", "/root/cdk/serviceaccount.key"
+ )
+ rename_file_idempotent("/srv/kubernetes/basic_auth.csv", "/root/cdk/basic_auth.csv")
+ rename_file_idempotent(
+ "/srv/kubernetes/known_tokens.csv", "/root/cdk/known_tokens.csv"
+ )
+
+ # cleanup old files
+ files = [
+ "/lib/systemd/system/kube-apiserver.service",
+ "/lib/systemd/system/kube-controller-manager.service",
+ "/lib/systemd/system/kube-scheduler.service",
+ "/etc/default/kube-defaults",
+ "/etc/default/kube-apiserver.defaults",
+ "/etc/default/kube-controller-manager.defaults",
+ "/etc/default/kube-scheduler.defaults",
+ "/home/ubuntu/kubectl",
+ "/usr/local/bin/kubectl",
+ "/usr/local/bin/kube-apiserver",
+ "/usr/local/bin/kube-controller-manager",
+ "/usr/local/bin/kube-scheduler",
+ "/etc/kubernetes",
+ ]
+ for file in files:
+ if os.path.isdir(file):
+ hookenv.log("Removing directory: " + file)
+ shutil.rmtree(file)
+ elif os.path.isfile(file):
+ hookenv.log("Removing file: " + file)
+ os.remove(file)
+
+
+@when("kubernetes-master.upgrade-specified")
+def do_upgrade():
+ install_snaps()
+ remove_state("kubernetes-master.upgrade-needed")
+ remove_state("kubernetes-master.upgrade-specified")
+
+
+def install_snaps():
+ channel = hookenv.config("channel")
+ hookenv.status_set("maintenance", "Installing core snap")
+ snap.install("core")
+ hookenv.status_set("maintenance", "Installing kubectl snap")
+ snap.install("kubectl", channel=channel, classic=True)
+ hookenv.status_set("maintenance", "Installing kube-apiserver snap")
+ snap.install("kube-apiserver", channel=channel)
+ hookenv.status_set("maintenance", "Installing kube-controller-manager snap")
+ snap.install("kube-controller-manager", channel=channel)
+ hookenv.status_set("maintenance", "Installing kube-scheduler snap")
+ snap.install("kube-scheduler", channel=channel)
+ hookenv.status_set("maintenance", "Installing cdk-addons snap")
+ snap.install("cdk-addons", channel=channel)
+ hookenv.status_set("maintenance", "Installing kube-proxy snap")
+ snap.install("kube-proxy", channel=channel, classic=True)
+ calculate_and_store_resource_checksums(checksum_prefix, snap_resources)
+ db.set("snap.resources.fingerprint.initialised", True)
+ set_state("kubernetes-master.snaps.installed")
+ remove_state("kubernetes-master.components.started")
+
+
+@when("kubernetes-master.snaps.installed", "leadership.is_leader")
+@when_not("leadership.set.cohort_keys")
+def create_or_update_cohort_keys():
+ cohort_keys = {}
+ for snapname in cohort_snaps:
+ cohort_key = snap.create_cohort_snapshot(snapname)
+ cohort_keys[snapname] = cohort_key
+ leader_set(cohort_keys=json.dumps(cohort_keys))
+ hookenv.log("Snap cohort keys have been created.", level=hookenv.INFO)
+
+ # Prime revision info so we can detect changes later
+ cohort_revs = kubernetes_master.get_snap_revs(cohort_snaps)
+ data_changed("leader-cohort-revs", cohort_revs)
+ hookenv.log(
+ "Tracking cohort revisions: {}".format(cohort_revs), level=hookenv.DEBUG
+ )
+
+
+@when(
+ "kubernetes-master.snaps.installed",
+ "leadership.is_leader",
+ "leadership.set.cohort_keys",
+)
+def check_cohort_updates():
+ cohort_revs = kubernetes_master.get_snap_revs(cohort_snaps)
+ if cohort_revs and data_changed("leader-cohort-revs", cohort_revs):
+ leader_set(cohort_keys=None)
+ hookenv.log("Snap cohort revisions have changed.", level=hookenv.INFO)
+
+
+@when("kubernetes-master.snaps.installed", "leadership.set.cohort_keys")
+@when_none("coordinator.granted.cohort", "coordinator.requested.cohort")
+def safely_join_cohort():
+ """Coordinate the rollout of snap refreshes.
+
+ When cohort keys change, grab a lock so that only 1 unit in the
+ application joins the new cohort at a time. This allows us to roll out
+ snap refreshes without risking all units going down at once.
+ """
+ cohort_keys = leader_get("cohort_keys")
+ # NB: initial data-changed is always true
+ if data_changed("leader-cohorts", cohort_keys):
+ clear_flag("kubernetes-master.cohorts.joined")
+ clear_flag("kubernetes-master.cohorts.sent")
+ charms.coordinator.acquire("cohort")
+
+
+@when(
+ "kubernetes-master.snaps.installed",
+ "leadership.set.cohort_keys",
+ "coordinator.granted.cohort",
+)
+@when_not("kubernetes-master.cohorts.joined")
+def join_or_update_cohorts():
+ """Join or update a cohort snapshot.
+
+ All units of this application (leader and followers) need to refresh their
+ installed snaps to the current cohort snapshot.
+ """
+ cohort_keys = json.loads(leader_get("cohort_keys"))
+ for snapname in cohort_snaps:
+ cohort_key = cohort_keys[snapname]
+ if snap.is_installed(snapname): # we also manage workers' cohorts
+ hookenv.status_set("maintenance", "Joining snap cohort.")
+ snap.join_cohort_snapshot(snapname, cohort_key)
+ set_flag("kubernetes-master.cohorts.joined")
+ hookenv.log("{} has joined the snap cohort".format(hookenv.local_unit()))
+
+
+@when(
+ "kubernetes-master.snaps.installed",
+ "leadership.set.cohort_keys",
+ "kubernetes-master.cohorts.joined",
+ "kube-control.connected",
+)
+@when_not("kubernetes-master.cohorts.sent")
+def send_cohorts():
+ """Send cohort information to workers.
+
+ If we have peers, wait until all peers are updated before sending.
+ Otherwise, we're a single unit k8s-master and can fire when connected.
+ """
+ cohort_keys = json.loads(leader_get("cohort_keys"))
+ kube_control = endpoint_from_flag("kube-control.connected")
+ kube_masters = endpoint_from_flag("kube-masters.connected")
+
+ # If we have peers, tell them we've joined the cohort. This is needed so
+ # we don't tell workers about cohorts until all masters are in-sync.
+ goal_peers = len(list(hookenv.expected_peer_units()))
+ if goal_peers > 0:
+ if kube_masters:
+ # tell peers about the cohort keys
+ kube_masters.set_cohort_keys(cohort_keys)
+ else:
+ msg = "Waiting for {} peers before setting the cohort.".format(goal_peers)
+ hookenv.log(msg, level=hookenv.DEBUG)
+ return
+
+ if is_flag_set("kube-masters.cohorts.ready"):
+ # tell workers about the cohort keys
+ kube_control.set_cohort_keys(cohort_keys)
+ hookenv.log(
+ "{} (peer) sent cohort keys to workers".format(hookenv.local_unit())
+ )
+ else:
+ msg = "Waiting for k8s-masters to agree on cohorts."
+ hookenv.log(msg, level=hookenv.DEBUG)
+ return
+ else:
+ # tell workers about the cohort keys
+ kube_control.set_cohort_keys(cohort_keys)
+ hookenv.log(
+ "{} (single) sent cohort keys to workers".format(hookenv.local_unit())
+ )
+
+ set_flag("kubernetes-master.cohorts.sent")
+
+
+@when("etcd.available")
+@when("config.changed.enable-metrics")
+def enable_metric_changed():
+ """
+ Trigger an api server update.
+
+ :return: None
+ """
+ clear_flag("kubernetes-master.apiserver.configured")
+
+ if is_state("leadership.is_leader"):
+ configure_cdk_addons()
+
+
+@when("config.changed.client_password", "leadership.is_leader")
+def password_changed():
+ """Handle password change by reconfiguring authentication."""
+ remove_state("authentication.setup")
+
+
+@when("config.changed.storage-backend")
+def storage_backend_changed():
+ remove_state("kubernetes-master.components.started")
+
+
+@when("cni.connected")
+@when_not("cni.configured")
+def configure_cni(cni):
+ """Set master configuration on the CNI relation. This lets the CNI
+ subordinate know that we're the master so it can respond accordingly."""
+ cni.set_config(is_master=True, kubeconfig_path="")
+
+
+@when("leadership.is_leader")
+@when_not("authentication.setup")
+def setup_leader_authentication():
+ """
+ Setup service accounts and tokens for the cluster.
+
+ As of 1.19 charms, this will also propogate a generic basic_auth.csv, which is
+ merged into known_tokens.csv, which are migrated to secrets during upgrade-charm.
+ """
+ basic_auth = "/root/cdk/basic_auth.csv"
+ known_tokens = "/root/cdk/known_tokens.csv"
+ service_key = "/root/cdk/serviceaccount.key"
+ os.makedirs("/root/cdk", exist_ok=True)
+
+ hookenv.status_set("maintenance", "Rendering authentication templates.")
+
+ keys = [basic_auth, known_tokens, service_key]
+ # Try first to fetch data from an old leadership broadcast.
+ if not get_keys_from_leader(keys) or is_state("reconfigure.authentication.setup"):
+ kubernetes_master.deprecate_auth_file(basic_auth)
+ set_flag("kubernetes-master.basic-auth.migrated")
+
+ kubernetes_master.deprecate_auth_file(known_tokens)
+ set_flag("kubernetes-master.token-auth.migrated")
+
+ # Generate the default service account token key
+ if not os.path.isfile(service_key):
+ cmd = ["openssl", "genrsa", "-out", service_key, "2048"]
+ check_call(cmd)
+ remove_state("reconfigure.authentication.setup")
+
+ # Write the admin token every time we setup authn to ensure we honor a
+ # configured password.
+ client_pass = hookenv.config("client_password") or get_token("admin")
+ setup_tokens(client_pass, "admin", "admin", "system:masters")
+
+ create_tokens_and_sign_auth_requests()
+
+ # send auth files to followers via leadership data
+ leader_data = {}
+ for f in [basic_auth, known_tokens, service_key]:
+ try:
+ with open(f, "r") as fp:
+ leader_data[f] = fp.read()
+ except FileNotFoundError:
+ pass
+
+ # this is slightly opaque, but we are sending file contents under its file
+ # path as a key.
+ # eg:
+ # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
+ leader_set(leader_data)
+
+ remove_state("kubernetes-master.components.started")
+ remove_state("kube-control.requests.changed")
+ set_state("authentication.setup")
+
+
+@when_not("leadership.is_leader")
+def setup_non_leader_authentication():
+ basic_auth = "/root/cdk/basic_auth.csv"
+ known_tokens = "/root/cdk/known_tokens.csv"
+ service_key = "/root/cdk/serviceaccount.key"
+
+ # Starting with 1.19, we don't use csv auth files; handle changing secrets.
+ secrets = {
+ "admin": get_token("admin"),
+ "kube-controller-manager": get_token("system:kube-controller-manager"),
+ "kube-proxy": get_token("system:kube-proxy"),
+ "kube-scheduler": get_token("system:kube-scheduler"),
+ }
+ if data_changed("secrets-data", secrets):
+ set_flag("kubernetes-master.token-auth.migrated")
+ build_kubeconfig()
+ remove_state("kubernetes-master.components.started")
+
+ keys = [basic_auth, known_tokens, service_key]
+ # Pre-secrets, the source of truth for non-leaders is the leader.
+ # Therefore we overwrite_local with whatever the leader has.
+ if not get_keys_from_leader(keys, overwrite_local=True):
+ # the keys were not retrieved. Non-leaders have to retry.
+ return
+
+ if any_file_changed(keys):
+ remove_state("kubernetes-master.components.started")
+
+ # Clear stale creds from the kube-control relation so that the leader can
+ # assume full control of them.
+ kube_control = endpoint_from_flag("kube-control.connected")
+ if kube_control:
+ kube_control.clear_creds()
+
+ remove_state("kube-control.requests.changed")
+ set_state("authentication.setup")
+
+
+def get_keys_from_leader(keys, overwrite_local=False):
+ """
+ Gets the broadcasted keys from the leader and stores them in
+ the corresponding files.
+
+ Args:
+ keys: list of keys. Keys are actually files on the FS.
+
+ Returns: True if all key were fetched, False if not.
+
+ """
+ # This races with other codepaths, and seems to require being created first
+ # This block may be extracted later, but for now seems to work as intended
+ os.makedirs("/root/cdk", exist_ok=True)
+
+ for k in keys:
+ # If the path does not exist, assume we need it
+ if not os.path.exists(k) or overwrite_local:
+ # Fetch data from leadership broadcast
+ contents = leader_get(k)
+ # Default to logging the warning and wait for leader data to be set
+ if contents is None:
+ hookenv.log("Missing content for file {}".format(k))
+ return False
+ # Write out the file and move on to the next item
+ with open(k, "w+") as fp:
+ fp.write(contents)
+ fp.write("\n")
+
+ return True
+
+
+@when("kubernetes-master.snaps.installed")
+def set_app_version():
+ """ Declare the application version to juju """
+ version = check_output(["kube-apiserver", "--version"])
+ hookenv.application_version_set(version.split(b" v")[-1].rstrip())
+
+
+@hookenv.atstart
+def check_vault_pending():
+ try:
+ goal_state = hookenv.goal_state()
+ except NotImplementedError:
+ goal_state = {}
+ vault_kv_goal = "vault-kv" in goal_state.get("relations", {})
+ vault_kv_connected = is_state("vault-kv.connected")
+ vault_kv_related = vault_kv_goal or vault_kv_connected
+ vault_kv_ready = is_state("layer.vault-kv.ready")
+ if vault_kv_related and not vault_kv_ready:
+ set_flag("kubernetes-master.vault-kv.pending")
+ else:
+ clear_flag("kubernetes-master.vault-kv.pending")
+
+
+@hookenv.atexit
+def set_final_status():
+ """ Set the final status of the charm as we leave hook execution """
+ try:
+ goal_state = hookenv.goal_state()
+ except NotImplementedError:
+ goal_state = {}
+
+ if is_flag_set("upgrade.series.in-progress"):
+ hookenv.status_set("blocked", "Series upgrade in progress")
+ return
+
+ if not is_flag_set("certificates.available"):
+ hookenv.status_set("blocked", "Missing relation to certificate authority.")
+ return
+
+ if is_flag_set("kubernetes-master.secure-storage.failed"):
+ hookenv.status_set(
+ "blocked",
+ "Failed to configure encryption; "
+ "secrets are unencrypted or inaccessible",
+ )
+ return
+ elif is_flag_set("kubernetes-master.secure-storage.created"):
+ if not encryption_config_path().exists():
+ hookenv.status_set(
+ "blocked", "VaultLocker containing encryption config " "unavailable"
+ )
+ return
+
+ vsphere_joined = is_state("endpoint.vsphere.joined")
+ azure_joined = is_state("endpoint.azure.joined")
+ cloud_blocked = is_state("kubernetes-master.cloud.blocked")
+ if vsphere_joined and cloud_blocked:
+ hookenv.status_set(
+ "blocked", "vSphere integration requires K8s 1.12 or greater"
+ )
+ return
+ if azure_joined and cloud_blocked:
+ hookenv.status_set("blocked", "Azure integration requires K8s 1.11 or greater")
+ return
+
+ if is_state("kubernetes-master.cloud.pending"):
+ hookenv.status_set("waiting", "Waiting for cloud integration")
+ return
+
+ if not is_state("kube-api-endpoint.available"):
+ if "kube-api-endpoint" in goal_state.get("relations", {}):
+ status = "waiting"
+ else:
+ status = "blocked"
+ hookenv.status_set(status, "Waiting for kube-api-endpoint relation")
+ return
+
+ if not is_state("kube-control.connected"):
+ if "kube-control" in goal_state.get("relations", {}):
+ status = "waiting"
+ else:
+ status = "blocked"
+ hookenv.status_set(status, "Waiting for workers.")
+ return
+
+ ks = endpoint_from_flag("keystone-credentials.available")
+ if ks and ks.api_version() == "2":
+ msg = "Keystone auth v2 detected. v3 is required."
+ hookenv.status_set("blocked", msg)
+ return
+
+ upgrade_needed = is_state("kubernetes-master.upgrade-needed")
+ upgrade_specified = is_state("kubernetes-master.upgrade-specified")
+ if upgrade_needed and not upgrade_specified:
+ msg = "Needs manual upgrade, run the upgrade action"
+ hookenv.status_set("blocked", msg)
+ return
+
+ try:
+ get_dns_provider()
+ except InvalidDnsProvider as e:
+ if e.value == "core-dns":
+ msg = "dns-provider=core-dns requires k8s 1.14+"
+ else:
+ msg = "dns-provider=%s is invalid" % e.value
+ hookenv.status_set("blocked", msg)
+ return
+
+ if is_state("kubernetes-master.vault-kv.pending"):
+ hookenv.status_set(
+ "waiting", "Waiting for encryption info from Vault " "to secure secrets"
+ )
+ return
+
+ if is_state("kubernetes-master.had-service-cidr-expanded"):
+ hookenv.status_set(
+ "waiting", "Waiting to retry updates for service-cidr expansion"
+ )
+ return
+
+ auth_setup = is_flag_set("authentication.setup")
+ webhook_tokens_setup = is_flag_set("kubernetes-master.auth-webhook-tokens.setup")
+ if auth_setup and not webhook_tokens_setup:
+ hookenv.status_set("waiting", "Failed to setup auth-webhook tokens; will retry")
+ return
+
+ if is_state("kubernetes-master.components.started"):
+ # All services should be up and running at this point. Double-check...
+ failing_services = master_services_down()
+ if len(failing_services) != 0:
+ msg = "Stopped services: {}".format(",".join(failing_services))
+ hookenv.status_set("blocked", msg)
+ return
+ else:
+ # if we don't have components starting, we're waiting for that and
+ # shouldn't fall through to Kubernetes master running.
+ if is_state("cni.available"):
+ hookenv.status_set("maintenance", "Waiting for master components to start")
+ else:
+ hookenv.status_set("waiting", "Waiting for CNI plugins to become available")
+ return
+
+ # Note that after this point, kubernetes-master.components.started is
+ # always True.
+ is_leader = is_state("leadership.is_leader")
+ authentication_setup = is_state("authentication.setup")
+ if not is_leader and not authentication_setup:
+ hookenv.status_set("waiting", "Waiting on leader's crypto keys.")
+ return
+
+ addons_configured = is_state("cdk-addons.configured")
+ if is_leader and not addons_configured:
+ hookenv.status_set("waiting", "Waiting to retry addon deployment")
+ return
+
+ if is_leader and not is_state(
+ "kubernetes-master.system-monitoring-rbac-role.applied"
+ ):
+ msg = "Waiting to retry applying system:monitoring RBAC role"
+ hookenv.status_set("waiting", msg)
+ return
+
+ try:
+ unready = get_kube_system_pods_not_running()
+ except FailedToGetPodStatus:
+ hookenv.status_set("waiting", "Waiting for kube-system pods to start")
+ return
+
+ if unready:
+ plural = "s" if len(unready) > 1 else ""
+ msg = "Waiting for {} kube-system pod{} to start"
+ msg = msg.format(len(unready), plural)
+ hookenv.status_set("waiting", msg)
+ return
+
+ service_cidr = kubernetes_master.service_cidr()
+ if hookenv.config("service-cidr") != service_cidr:
+ msg = "WARN: cannot change service-cidr, still using " + service_cidr
+ hookenv.status_set("active", msg)
+ return
+
+ gpu_available = is_state("kube-control.gpu.available")
+ gpu_enabled = is_state("kubernetes-master.gpu.enabled")
+ if gpu_available and not gpu_enabled:
+ msg = 'GPUs available. Set allow-privileged="auto" to enable.'
+ hookenv.status_set("active", msg)
+ return
+
+ if (
+ is_state("ceph-storage.available")
+ and is_state("ceph-client.connected")
+ and is_state("kubernetes-master.privileged")
+ and not is_state("kubernetes-master.ceph.configured")
+ ):
+
+ ceph_admin = endpoint_from_flag("ceph-storage.available")
+
+ if get_version("kube-apiserver") >= (1, 12) and not ceph_admin.key():
+ hookenv.status_set("waiting", "Waiting for Ceph to provide a key.")
+ return
+
+ if is_leader and ks and is_flag_set("kubernetes-master.keystone-policy-error"):
+ hookenv.status_set("blocked", "Invalid keystone policy file.")
+ return
+
+ if (
+ is_leader
+ and ks
+ and not is_flag_set("kubernetes-master.keystone-policy-handled")
+ ):
+ hookenv.status_set("waiting", "Waiting to apply keystone policy file.")
+ return
+
+ hookenv.status_set("active", "Kubernetes master running.")
+
+
+def master_services_down():
+ """Ensure master services are up and running.
+
+ Return: list of failing services"""
+ failing_services = []
+ for service in master_services:
+ daemon = "snap.{}.daemon".format(service)
+
+ # Give each service up to a minute to become active; this is especially
+ # needed now that controller-mgr/scheduler/proxy need the apiserver
+ # to validate their token against a k8s secret.
+ attempt = 0
+ delay = 10
+ times = 6
+ while attempt < times:
+ hookenv.log(
+ "Checking if {} is active ({} / {})".format(daemon, attempt, times)
+ )
+ if host.service_running(daemon):
+ break
+ sleep(delay)
+ attempt += 1
+ else:
+ failing_services.append(service)
+ return failing_services
+
+
+def add_systemd_file_limit():
+ directory = "/etc/systemd/system/snap.kube-apiserver.daemon.service.d"
+ if not os.path.isdir(directory):
+ os.makedirs(directory)
+
+ file_name = "file-limit.conf"
+ path = os.path.join(directory, file_name)
+ if not os.path.isfile(path):
+ with open(path, "w") as f:
+ f.write("[Service]\n")
+ f.write("LimitNOFILE=65535")
+
+
+def add_systemd_restart_always():
+ template = "templates/service-always-restart.systemd-latest.conf"
+
+ try:
+ # Get the systemd version
+ cmd = ["systemd", "--version"]
+ output = check_output(cmd).decode("UTF-8")
+ line = output.splitlines()[0]
+ words = line.split()
+ assert words[0] == "systemd"
+ systemd_version = int(words[1])
+
+ # Check for old version (for xenial support)
+ if systemd_version < 230:
+ template = "templates/service-always-restart.systemd-229.conf"
+ except Exception:
+ traceback.print_exc()
+ hookenv.log(
+ "Failed to detect systemd version, using latest template", level="ERROR"
+ )
+
+ for service in master_services:
+ dest_dir = "/etc/systemd/system/snap.{}.daemon.service.d".format(service)
+ os.makedirs(dest_dir, exist_ok=True)
+ copyfile(template, "{}/always-restart.conf".format(dest_dir))
+
+
+def add_systemd_file_watcher():
+ """Setup systemd file-watcher service.
+
+ This service watches these files for changes:
+
+ /root/cdk/known_tokens.csv
+ /root/cdk/serviceaccount.key
+
+ If a file is changed, the service uses juju-run to invoke a script in a
+ hook context on this unit. If this unit is the leader, the script will
+ call leader-set to distribute the contents of these files to the
+ non-leaders so they can sync their local copies to match.
+
+ """
+ render(
+ "cdk.master.leader.file-watcher.sh",
+ "/usr/local/sbin/cdk.master.leader.file-watcher.sh",
+ {},
+ perms=0o777,
+ )
+ render(
+ "cdk.master.leader.file-watcher.service",
+ "/etc/systemd/system/cdk.master.leader.file-watcher.service",
+ {"unit": hookenv.local_unit()},
+ perms=0o644,
+ )
+ render(
+ "cdk.master.leader.file-watcher.path",
+ "/etc/systemd/system/cdk.master.leader.file-watcher.path",
+ {},
+ perms=0o644,
+ )
+ service_resume("cdk.master.leader.file-watcher.path")
+
+
+@when("etcd.available", "tls_client.certs.saved")
+@restart_on_change(
+ {
+ auth_webhook_conf: ["cdk.master.auth-webhook"],
+ auth_webhook_exe: ["cdk.master.auth-webhook"],
+ auth_webhook_svc: ["cdk.master.auth-webhook"],
+ }
+)
+def register_auth_webhook():
+ """Render auth webhook templates and start the related service."""
+ os.makedirs(auth_webhook_root, exist_ok=True)
+ config = hookenv.config()
+
+ # For 'api_ver', match the api version of the authentication.k8s.io TokenReview
+ # that k8s-apiserver will be sending:
+ # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18
+ context = {
+ "api_ver": "v1beta1",
+ "charm_dir": hookenv.charm_dir(),
+ "host": get_ingress_address("kube-api-endpoint"),
+ "pidfile": "auth-webhook.pid",
+ "port": 5000,
+ "root_dir": auth_webhook_root,
+ }
+
+ context["aws_iam_endpoint"] = None
+ if endpoint_from_flag("endpoint.aws-iam.ready"):
+ aws_webhook = Path(aws_iam_webhook)
+ if aws_webhook.exists():
+ aws_yaml = yaml.safe_load(aws_webhook.read_text())
+ try:
+ context["aws_iam_endpoint"] = aws_yaml["clusters"][0]["cluster"][
+ "server"
+ ]
+ except (KeyError, TypeError):
+ hookenv.log(
+ "Unable to find server in AWS IAM webhook: {}".format(aws_yaml)
+ )
+ pass
+
+ context["keystone_endpoint"] = None
+ if endpoint_from_flag("keystone-credentials.available"):
+ ks_webhook = Path(keystone_root) / "webhook.yaml"
+ if ks_webhook.exists():
+ ks_yaml = yaml.safe_load(ks_webhook.read_text())
+ try:
+ context["keystone_endpoint"] = ks_yaml["clusters"][0]["cluster"][
+ "server"
+ ]
+ except (KeyError, TypeError):
+ hookenv.log(
+ "Unable to find server in Keystone webhook: {}".format(ks_yaml)
+ )
+ pass
+
+ context["custom_authn_endpoint"] = None
+ custom_authn = config.get("authn-webhook-endpoint")
+ if custom_authn:
+ context["custom_authn_endpoint"] = custom_authn
+
+ render("cdk.master.auth-webhook-conf.yaml", auth_webhook_conf, context)
+ render("cdk.master.auth-webhook.py", auth_webhook_exe, context)
+ render(
+ "cdk.master.auth-webhook.logrotate", "/etc/logrotate.d/auth-webhook", context
+ )
+
+ # Set the number of gunicorn workers based on our core count. (2*cores)+1 is
+ # recommended: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
+ try:
+ cores = int(check_output(["nproc"]).decode("utf-8").strip())
+ except CalledProcessError:
+ # Our default architecture is 2-cores for k8s-master units
+ cores = 2
+ else:
+ # Put an upper bound on cores; more than 12ish workers is overkill
+ cores = 6 if cores > 6 else cores
+ context["num_workers"] = cores * 2 + 1
+ render("cdk.master.auth-webhook.service", auth_webhook_svc, context)
+ if any_file_changed([auth_webhook_svc]):
+ # if the service file has changed (or is new),
+ # we have to inform systemd about it
+ check_call(["systemctl", "daemon-reload"])
+ if not is_flag_set("kubernetes-master.auth-webhook-service.started"):
+ if service_resume("cdk.master.auth-webhook"):
+ set_flag("kubernetes-master.auth-webhook-service.started")
+ clear_flag("kubernetes-master.apiserver.configured")
+ else:
+ hookenv.status_set(
+ "maintenance", "Waiting for cdk.master.auth-webhook to start."
+ )
+ hookenv.log("cdk.master.auth-webhook failed to start; will retry")
+
+
+@when(
+ "kubernetes-master.apiserver.configured",
+ "kubernetes-master.auth-webhook-service.started",
+ "authentication.setup",
+)
+@when_not("kubernetes-master.auth-webhook-tokens.setup")
+def setup_auth_webhook_tokens():
+ """Reconfigure authentication to setup auth-webhook tokens.
+
+ If authentication has been setup with a non-auth-webhook configuration,
+ convert it to use auth-webhook tokens instead. Alternatively, if the
+ auth-webhook setup failed, this will also ensure that it is retried.
+ """
+ # Even if the apiserver is configured, it may not be fully started. Only
+ # proceed if we can get secrets.
+ if not kubectl_success("get", "secrets"):
+ hookenv.log("Secrets are not yet available; will retry")
+ return
+ if create_tokens_and_sign_auth_requests():
+ # Force setup_leader_authentication to be re-run.
+ remove_state("authentication.setup")
+
+
+@when(
+ "etcd.available",
+ "tls_client.certs.saved",
+ "authentication.setup",
+ "leadership.set.auto_storage_backend",
+ "leadership.set.cluster_tag",
+ "cni.available",
+)
+@when_not(
+ "kubernetes-master.components.started",
+ "kubernetes-master.cloud.pending",
+ "kubernetes-master.cloud.blocked",
+ "kubernetes-master.vault-kv.pending",
+ "tls_client.certs.changed",
+ "tls_client.ca.written",
+ "upgrade.series.in-progress",
+)
+def start_master():
+ """Run the Kubernetes master components."""
+ hookenv.status_set("maintenance", "Configuring the Kubernetes master services.")
+
+ if not is_state("kubernetes-master.vault-kv.pending") and not is_state(
+ "kubernetes-master.secure-storage.created"
+ ):
+ encryption_config_path().parent.mkdir(parents=True, exist_ok=True)
+ host.write_file(
+ path=str(encryption_config_path()),
+ perms=0o600,
+ content=yaml.safe_dump(
+ {
+ "kind": "EncryptionConfig",
+ "apiVersion": "v1",
+ "resources": [
+ {"resources": ["secrets"], "providers": [{"identity": {}}]}
+ ],
+ }
+ ),
+ )
+
+ kubernetes_master.freeze_service_cidr()
+
+ etcd = endpoint_from_flag("etcd.available")
+ if not etcd.get_connection_string():
+ # etcd is not returning a connection string. This happens when
+ # the master unit disconnects from etcd and is ready to terminate.
+ # No point in trying to start master services and fail. Just return.
+ return
+
+ # TODO: Make sure below relation is handled on change
+ # https://github.com/kubernetes/kubernetes/issues/43461
+ handle_etcd_relation(etcd)
+
+ # Set up additional systemd services
+ add_systemd_restart_always()
+ add_systemd_file_limit()
+ add_systemd_file_watcher()
+ add_systemd_iptables_patch()
+ check_call(["systemctl", "daemon-reload"])
+
+ # Add CLI options to all components
+ clear_flag("kubernetes-master.apiserver.configured")
+ configure_controller_manager()
+ configure_scheduler()
+
+ # kube-proxy
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ if kubernetes_common.is_ipv6(cluster_cidr):
+ kubernetes_common.enable_ipv6_forwarding()
+
+ local_address = get_ingress_address("kube-api-endpoint")
+ local_server = "https://{0}:{1}".format(local_address, 6443)
+
+ configure_kube_proxy(configure_prefix, [local_server], cluster_cidr)
+ service_restart("snap.kube-proxy.daemon")
+
+ set_state("kubernetes-master.components.started")
+ hookenv.open_port(6443)
+
+
+@when("config.changed.proxy-extra-args")
+def proxy_args_changed():
+ clear_flag("kubernetes-master.components.started")
+ clear_flag("config.changed.proxy-extra-args")
+
+
+@when("tls_client.certs.changed")
+def certs_changed():
+ clear_flag("kubernetes-master.components.started")
+ clear_flag("tls_client.certs.changed")
+
+
+@when("tls_client.ca.written")
+def ca_written():
+ clear_flag("kubernetes-master.components.started")
+ if is_state("leadership.is_leader"):
+ if leader_get("kubernetes-master-addons-ca-in-use"):
+ leader_set({"kubernetes-master-addons-restart-for-ca": True})
+ clear_flag("tls_client.ca.written")
+
+
+@when("etcd.available")
+def etcd_data_change(etcd):
+ """Etcd scale events block master reconfiguration due to the
+ kubernetes-master.components.started state. We need a way to
+ handle these events consistently only when the number of etcd
+ units has actually changed"""
+
+ # key off of the connection string
+ connection_string = etcd.get_connection_string()
+
+ # If the connection string changes, remove the started state to trigger
+ # handling of the master components
+ if data_changed("etcd-connect", connection_string):
+ remove_state("kubernetes-master.components.started")
+
+ # If the cert info changes, remove the started state to trigger
+ # handling of the master components
+ if data_changed("etcd-certs", etcd.get_client_credentials()):
+ clear_flag("kubernetes-master.components.started")
+
+ # We are the leader and the auto_storage_backend is not set meaning
+ # this is the first time we connect to etcd.
+ auto_storage_backend = leader_get("auto_storage_backend")
+ is_leader = is_state("leadership.is_leader")
+ if is_leader and not auto_storage_backend:
+ if etcd.get_version().startswith("3."):
+ leader_set(auto_storage_backend="etcd3")
+ else:
+ leader_set(auto_storage_backend="etcd2")
+
+
+@when("kube-control.connected")
+@when("cdk-addons.configured")
+def send_cluster_dns_detail(kube_control):
+ """ Send cluster DNS info """
+ dns_provider = endpoint_from_flag("dns-provider.available")
+ if dns_provider:
+ details = dns_provider.details()
+ kube_control.set_dns(
+ details["port"], details["domain"], details["sdn-ip"], True
+ )
+ else:
+ try:
+ dns_provider = get_dns_provider()
+ except InvalidDnsProvider:
+ hookenv.log(traceback.format_exc())
+ return
+ dns_enabled = dns_provider != "none"
+ dns_domain = hookenv.config("dns_domain")
+ dns_ip = None
+ if dns_enabled:
+ try:
+ dns_ip = kubernetes_master.get_dns_ip()
+ except CalledProcessError:
+ hookenv.log("DNS addon service not ready yet")
+ return
+ kube_control.set_dns(53, dns_domain, dns_ip, dns_enabled)
+
+
+def create_tokens_and_sign_auth_requests():
+ """Create tokens for CK users and services."""
+ clear_flag("kubernetes-master.auth-webhook-tokens.setup")
+ # NB: This may be called before kube-apiserver is up when bootstrapping new
+ # clusters with auth-webhook. In this case, setup_tokens will be a no-op.
+ # We will re-enter this function once master services are available to
+ # create proper secrets.
+ controller_manager_token = get_token("system:kube-controller-manager")
+ if not controller_manager_token:
+ setup_tokens(None, "system:kube-controller-manager", "kube-controller-manager")
+
+ proxy_token = get_token("system:kube-proxy")
+ if not proxy_token:
+ setup_tokens(None, "system:kube-proxy", "kube-proxy")
+ proxy_token = get_token("system:kube-proxy")
+
+ scheduler_token = get_token("system:kube-scheduler")
+ if not scheduler_token:
+ setup_tokens(None, "system:kube-scheduler", "system:kube-scheduler")
+
+ client_token = get_token("admin")
+ if not client_token:
+ setup_tokens(None, "admin", "admin", "system:masters")
+ client_token = get_token("admin")
+
+ monitoring_token = get_token("system:monitoring")
+ if not monitoring_token:
+ setup_tokens(None, "system:monitoring", "system:monitoring")
+
+ if not (proxy_token and client_token):
+ # When bootstrapping a new cluster, we may not have all our secrets yet.
+ # Do not let the kubelets start without all the needed tokens.
+ hookenv.log(
+ "Missing required tokens for kubelet startup; will retry", hookenv.WARNING
+ )
+ return False
+
+ kube_control = endpoint_from_flag("kube-control.connected")
+ requests = kube_control.auth_user() if kube_control else []
+ any_failed = False
+ for request in requests:
+ username = request[1]["user"]
+ group = request[1]["group"]
+ if not username or not group:
+ continue
+ kubelet_token = get_token(username)
+ if not kubelet_token:
+ # Username will be in the form of system:node:.
+ # User ID will be a worker , and while not used today, we store
+ # this in case it becomes useful to map a secret to a unit in the future.
+ userid = request[0]
+ setup_tokens(None, username, userid, group)
+ kubelet_token = get_token(username)
+ if not kubelet_token:
+ hookenv.log(
+ "Failed to create token for {}; will retry".format(username),
+ hookenv.WARNING,
+ )
+ any_failed = True
+ continue
+ kube_control.sign_auth_request(
+ request[0], username, kubelet_token, proxy_token, client_token
+ )
+ if not any_failed:
+ set_flag("kubernetes-master.auth-webhook-tokens.setup")
+ return True
+ else:
+ return False
+
+
+@when("kube-api-endpoint.available")
+def push_service_data():
+ """Send configuration to the load balancer, and close access to the
+ public interface"""
+ kube_api = endpoint_from_flag("kube-api-endpoint.available")
+
+ external_endpoints = kubernetes_master.get_external_lb_endpoints()
+ if external_endpoints:
+ addresses = [e[0] for e in external_endpoints]
+ kube_api.configure(kubernetes_master.STANDARD_API_PORT, addresses, addresses)
+ else:
+ # no external addresses configured, so rely on the interface layer
+ # to use the ingress address for each relation
+ kube_api.configure(kubernetes_master.STANDARD_API_PORT)
+
+
+@when("certificates.available", "kube-api-endpoint.available", "cni.available")
+def send_data():
+ """Send the data that is required to create a server certificate for
+ this server."""
+ kube_api_endpoint = endpoint_from_flag("kube-api-endpoint.available")
+
+ # Use the public ip of this unit as the Common Name for the certificate.
+ common_name = hookenv.unit_public_ip()
+
+ # Get the SDN gateways based on the service CIDRs.
+ k8s_service_ips = kubernetes_master.get_kubernetes_service_ips()
+
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ bind_ips = kubernetes_common.get_bind_addrs(
+ ipv4=kubernetes_common.is_ipv4(cluster_cidr),
+ ipv6=kubernetes_common.is_ipv6(cluster_cidr),
+ )
+
+ # Get ingress address (this is probably already covered by bind_ips,
+ # but list it explicitly as well just in case it's not).
+ ingress_ip = get_ingress_address(kube_api_endpoint.endpoint_name)
+
+ domain = hookenv.config("dns_domain")
+ # Create SANs that the tls layer will add to the server cert.
+ sans = (
+ [
+ # The CN field is checked as a hostname, so if it's an IP, it
+ # won't match unless also included in the SANs as an IP field.
+ common_name,
+ ingress_ip,
+ socket.gethostname(),
+ socket.getfqdn(),
+ "kubernetes",
+ "kubernetes.{0}".format(domain),
+ "kubernetes.default",
+ "kubernetes.default.svc",
+ "kubernetes.default.svc.{0}".format(domain),
+ ]
+ + k8s_service_ips
+ + bind_ips
+ )
+
+ lb_addrs = [e[0] for e in kubernetes_master.get_lb_endpoints()]
+ sans.extend(lb_addrs)
+
+ # maybe they have extra names they want as SANs
+ extra_sans = hookenv.config("extra_sans")
+ if extra_sans and not extra_sans == "":
+ sans.extend(extra_sans.split())
+
+ # Request a server cert with this information.
+ tls_client.request_server_cert(
+ common_name,
+ sorted(set(sans)),
+ crt_path=server_crt_path,
+ key_path=server_key_path,
+ )
+
+ # Request a client cert for kubelet.
+ tls_client.request_client_cert(
+ "system:kube-apiserver", crt_path=client_crt_path, key_path=client_key_path
+ )
+
+
+@when(
+ "config.changed.extra_sans", "certificates.available", "kube-api-endpoint.available"
+)
+def update_certificates():
+ # NOTE: This handler may be called by another function. Two relationships
+ # are required, otherwise the send_data function fails.
+ # (until the relations are available)
+ missing_relations = get_unset_flags(
+ "certificates.available", "kube-api-endpoint.available"
+ )
+ if missing_relations:
+ hookenv.log(
+ "Missing relations: '{}'".format(", ".join(missing_relations)),
+ hookenv.ERROR,
+ )
+ return
+
+ # Using the config.changed.extra_sans flag to catch changes.
+ # IP changes will take ~5 minutes or so to propagate, but
+ # it will update.
+ send_data()
+ clear_flag("config.changed.extra_sans")
+
+
+@when(
+ "kubernetes-master.components.started",
+ "leadership.is_leader",
+ "cdk-addons.reconfigure",
+)
+def reconfigure_cdk_addons():
+ configure_cdk_addons()
+
+
+@when(
+ "kubernetes-master.components.started",
+ "leadership.is_leader",
+ "leadership.set.cluster_tag",
+)
+@when_not("upgrade.series.in-progress")
+def configure_cdk_addons():
+ """ Configure CDK addons """
+ remove_state("cdk-addons.reconfigure")
+ remove_state("cdk-addons.configured")
+ remove_state("kubernetes-master.aws.changed")
+ remove_state("kubernetes-master.azure.changed")
+ remove_state("kubernetes-master.gcp.changed")
+ remove_state("kubernetes-master.openstack.changed")
+ load_gpu_plugin = hookenv.config("enable-nvidia-plugin").lower()
+ gpuEnable = (
+ get_version("kube-apiserver") >= (1, 9)
+ and load_gpu_plugin == "auto"
+ and is_state("kubernetes-master.gpu.enabled")
+ )
+ # addons-registry is deprecated in 1.15, but it should take precedence
+ # when configuring the cdk-addons snap until 1.17 is released.
+ registry = hookenv.config("addons-registry")
+ if registry and get_version("kube-apiserver") < (1, 17):
+ hookenv.log("addons-registry is deprecated; " "use image-registry instead")
+ else:
+ registry = hookenv.config("image-registry")
+ dbEnabled = str(hookenv.config("enable-dashboard-addons")).lower()
+ try:
+ dnsProvider = get_dns_provider()
+ except InvalidDnsProvider:
+ hookenv.log(traceback.format_exc())
+ return
+ metricsEnabled = str(hookenv.config("enable-metrics")).lower()
+ default_storage = ""
+ ceph = {}
+ ceph_ep = endpoint_from_flag("ceph-storage.available")
+ cephfs_mounter = hookenv.config("cephfs-mounter")
+ if (
+ ceph_ep
+ and ceph_ep.key()
+ and ceph_ep.fsid()
+ and ceph_ep.mon_hosts()
+ and is_state("kubernetes-master.ceph.configured")
+ and get_version("kube-apiserver") >= (1, 12)
+ ):
+ cephEnabled = "true"
+ b64_ceph_key = base64.b64encode(ceph_ep.key().encode("utf-8"))
+ ceph["admin_key"] = b64_ceph_key.decode("ascii")
+ ceph["fsid"] = ceph_ep.fsid()
+ ceph["kubernetes_key"] = b64_ceph_key.decode("ascii")
+ ceph["mon_hosts"] = ceph_ep.mon_hosts()
+ default_storage = hookenv.config("default-storage")
+ if kubernetes_master.query_cephfs_enabled():
+ cephFsEnabled = "true"
+ ceph["fsname"] = kubernetes_master.get_cephfs_fsname() or ""
+ else:
+ cephFsEnabled = "false"
+ else:
+ cephEnabled = "false"
+ cephFsEnabled = "false"
+
+ keystone = {}
+ ks = endpoint_from_flag("keystone-credentials.available")
+ if ks:
+ keystoneEnabled = "true"
+ keystone["cert"] = "/root/cdk/server.crt"
+ keystone["key"] = "/root/cdk/server.key"
+ keystone["url"] = "{}://{}:{}/v{}".format(
+ ks.credentials_protocol(),
+ ks.credentials_host(),
+ ks.credentials_port(),
+ ks.api_version(),
+ )
+ keystone["keystone-ca"] = hookenv.config("keystone-ssl-ca")
+ else:
+ keystoneEnabled = "false"
+
+ enable_aws = str(is_flag_set("endpoint.aws.ready")).lower()
+ enable_azure = str(is_flag_set("endpoint.azure.ready")).lower()
+ enable_gcp = str(is_flag_set("endpoint.gcp.ready")).lower()
+ enable_openstack = str(is_flag_set("endpoint.openstack.ready")).lower()
+ openstack = endpoint_from_flag("endpoint.openstack.ready")
+
+ if is_state("kubernetes-master.cdk-addons.unique-cluster-tag"):
+ cluster_tag = leader_get("cluster_tag")
+ else:
+ # allow for older upgraded charms to control when they start sending
+ # the unique cluster tag to cdk-addons
+ cluster_tag = "kubernetes"
+
+ args = [
+ "kubeconfig=" + cdk_addons_kubectl_config_path,
+ "arch=" + arch(),
+ "dns-domain=" + hookenv.config("dns_domain"),
+ "registry=" + registry,
+ "enable-dashboard=" + dbEnabled,
+ "enable-metrics=" + metricsEnabled,
+ "enable-gpu=" + str(gpuEnable).lower(),
+ "enable-ceph=" + cephEnabled,
+ "enable-cephfs=" + cephFsEnabled,
+ "cephfs-mounter=" + cephfs_mounter,
+ "ceph-admin-key=" + (ceph.get("admin_key", "")),
+ "ceph-fsid=" + (ceph.get("fsid", "")),
+ "ceph-fsname=" + (ceph.get("fsname", "")),
+ "ceph-kubernetes-key=" + (ceph.get("admin_key", "")),
+ 'ceph-mon-hosts="' + (ceph.get("mon_hosts", "")) + '"',
+ "default-storage=" + default_storage,
+ "enable-keystone=" + keystoneEnabled,
+ "keystone-cert-file=" + keystone.get("cert", ""),
+ "keystone-key-file=" + keystone.get("key", ""),
+ "keystone-server-url=" + keystone.get("url", ""),
+ "keystone-server-ca=" + keystone.get("keystone-ca", ""),
+ "dashboard-auth=token",
+ "enable-aws=" + enable_aws,
+ "enable-azure=" + enable_azure,
+ "enable-gcp=" + enable_gcp,
+ "enable-openstack=" + enable_openstack,
+ "monitorstorage=" + hookenv.config("monitoring-storage"),
+ "cluster-tag=" + cluster_tag,
+ ]
+ if openstack:
+ args.extend(
+ [
+ "openstack-cloud-conf="
+ + base64.b64encode(
+ generate_openstack_cloud_config().encode("utf-8")
+ ).decode("utf-8"),
+ "openstack-endpoint-ca=" + (openstack.endpoint_tls_ca or ""),
+ ]
+ )
+ if get_version("kube-apiserver") >= (1, 14):
+ args.append("dns-provider=" + dnsProvider)
+ else:
+ enableKubeDNS = dnsProvider == "kube-dns"
+ args.append("enable-kube-dns=" + str(enableKubeDNS).lower())
+ check_call(["snap", "set", "cdk-addons"] + args)
+ if not addons_ready():
+ remove_state("cdk-addons.configured")
+ return
+
+ set_state("cdk-addons.configured")
+ leader_set({"kubernetes-master-addons-ca-in-use": True})
+ if ks:
+ leader_set({"keystone-cdk-addons-configured": True})
+ else:
+ leader_set({"keystone-cdk-addons-configured": None})
+
+
+@retry(times=3, delay_secs=20)
+def addons_ready():
+ """
+ Test if the add ons got installed
+
+ Returns: True is the addons got applied
+
+ """
+ try:
+ check_call(["cdk-addons.apply"])
+ return True
+ except CalledProcessError:
+ hookenv.log("Addons are not ready yet.")
+ return False
+
+
+@when("ceph-storage.available")
+def ceph_state_control():
+ """Determine if we should remove the state that controls the re-render
+ and execution of the ceph-relation-changed event because there
+ are changes in the relationship data, and we should re-render any
+ configs, keys, and/or service pre-reqs"""
+
+ ceph_admin = endpoint_from_flag("ceph-storage.available")
+ ceph_relation_data = {
+ "mon_hosts": ceph_admin.mon_hosts(),
+ "fsid": ceph_admin.fsid(),
+ "auth_supported": ceph_admin.auth(),
+ "hostname": socket.gethostname(),
+ "key": ceph_admin.key(),
+ }
+
+ # Re-execute the rendering if the data has changed.
+ if data_changed("ceph-config", ceph_relation_data):
+ remove_state("kubernetes-master.ceph.configured")
+
+
+@when("kubernetes-master.ceph.configured")
+@when_not("ceph-storage.available")
+def ceph_storage_gone():
+ # ceph has left, so clean up
+ clear_flag("kubernetes-master.apiserver.configured")
+ remove_state("kubernetes-master.ceph.configured")
+
+
+@when("kubernetes-master.ceph.pools.created")
+@when_not("ceph-client.connected")
+def ceph_client_gone():
+ # can't nuke pools, but we can't be certain that they
+ # are still made when a new relation comes in
+ remove_state("kubernetes-master.ceph.pools.created")
+
+
+@when("etcd.available")
+@when("ceph-storage.available")
+@when_not("kubernetes-master.privileged")
+@when_not("kubernetes-master.ceph.configured")
+def ceph_storage_privilege():
+ """
+ Before we configure Ceph, we
+ need to allow the master to
+ run privileged containers.
+
+ :return: None
+ """
+ clear_flag("kubernetes-master.apiserver.configured")
+
+
+@when("ceph-client.connected")
+@when("kubernetes-master.ceph.configured")
+@when_not("kubernetes-master.ceph.pool.created")
+def ceph_storage_pool():
+ """Once Ceph relation is ready,
+ we need to add storage pools.
+
+ :return: None
+ """
+ hookenv.log("Creating Ceph pools.")
+ ceph_client = endpoint_from_flag("ceph-client.connected")
+
+ pools = ["xfs-pool", "ext4-pool"]
+
+ for pool in pools:
+ hookenv.status_set("maintenance", "Creating {} pool.".format(pool))
+ try:
+ ceph_client.create_pool(name=pool, replicas=3)
+ except Exception as e:
+ hookenv.status_set("blocked", "Error creating {} pool: {}.".format(pool, e))
+
+ set_state("kubernetes-master.ceph.pool.created")
+
+
+@when("ceph-storage.available")
+@when("kubernetes-master.privileged")
+@when_not("kubernetes-master.ceph.configured")
+def ceph_storage():
+ """Ceph on kubernetes will require a few things - namely a ceph
+ configuration, and the ceph secret key file used for authentication.
+ This method will install the client package, and render the requisit files
+ in order to consume the ceph-storage relation."""
+ hookenv.log("Configuring Ceph.")
+
+ ceph_admin = endpoint_from_flag("ceph-storage.available")
+
+ # >=1.12 will use CSI.
+ if get_version("kube-apiserver") >= (1, 12) and not ceph_admin.key():
+ return # Retry until Ceph gives us a key.
+
+ # Enlist the ceph-admin key as a kubernetes secret
+ if ceph_admin.key():
+ encoded_key = base64.b64encode(ceph_admin.key().encode("utf-8"))
+ else:
+ # We didn't have a key, and cannot proceed. Do not set state and
+ # allow this method to re-execute
+ return
+
+ # CSI isn't available, so we need to do it ourselves,
+ if get_version("kube-apiserver") < (1, 12):
+ try:
+ # At first glance this is deceptive. The apply stanza will
+ # create if it doesn't exist, otherwise it will update the
+ # entry, ensuring our ceph-secret is always reflective of
+ # what we have in /etc/ceph assuming we have invoked this
+ # anytime that file would change.
+ context = {"secret": encoded_key.decode("ascii")}
+ render("ceph-secret.yaml", "/tmp/ceph-secret.yaml", context)
+ cmd = ["kubectl", "apply", "-f", "/tmp/ceph-secret.yaml"]
+ check_call(cmd)
+ os.remove("/tmp/ceph-secret.yaml")
+ set_state("kubernetes-master.ceph.pool.created")
+ except: # NOQA
+ # The enlistment in kubernetes failed, return and
+ # prepare for re-exec.
+ return
+
+ # When complete, set a state relating to configuration of the storage
+ # backend that will allow other modules to hook into this and verify we
+ # have performed the necessary pre-req steps to interface with a ceph
+ # deployment.
+ set_state("kubernetes-master.ceph.configured")
+
+
+@when("nrpe-external-master.available")
+@when_not("nrpe-external-master.initial-config")
+def initial_nrpe_config():
+ set_state("nrpe-external-master.initial-config")
+ update_nrpe_config()
+
+
+@when("config.changed.authorization-mode")
+def switch_auth_mode(forced=False):
+ config = hookenv.config()
+ mode = config.get("authorization-mode")
+
+ if data_changed("auth-mode", mode) or forced:
+ # manage flags to handle rbac related resources
+ if mode and "rbac" in mode.lower():
+ remove_state("kubernetes-master.remove.rbac")
+ set_state("kubernetes-master.create.rbac")
+ else:
+ remove_state("kubernetes-master.create.rbac")
+ set_state("kubernetes-master.remove.rbac")
+
+ # set ourselves up to restart since auth mode has changed
+ remove_state("kubernetes-master.components.started")
+
+
+@when("leadership.is_leader", "kubernetes-master.components.started")
+@when_not("kubernetes-master.pod-security-policy.applied")
+def create_pod_security_policy_resources():
+ pod_security_policy_path = "/root/cdk/pod-security-policy.yaml"
+
+ render("rbac-pod-security-policy.yaml", pod_security_policy_path, {})
+
+ hookenv.log("Creating pod security policy resources.")
+ if kubectl_manifest("apply", pod_security_policy_path):
+ set_state("kubernetes-master.pod-security-policy.applied")
+ else:
+ msg = "Failed to apply {}, will retry.".format(pod_security_policy_path)
+ hookenv.log(msg)
+
+
+@when(
+ "leadership.is_leader",
+ "kubernetes-master.components.started",
+ "kubernetes-master.create.rbac",
+)
+def create_rbac_resources():
+ rbac_proxy_path = "/root/cdk/rbac-proxy.yaml"
+
+ # NB: when metrics and logs are retrieved by proxy, the 'user' is the
+ # common name of the cert used to authenticate the proxied request.
+ # The CN for /root/cdk/client.crt is 'system:kube-apiserver'
+ # (see the send_data handler, above).
+ proxy_users = ["client", "system:kube-apiserver"]
+
+ context = {"juju_application": hookenv.service_name(), "proxy_users": proxy_users}
+ render("rbac-proxy.yaml", rbac_proxy_path, context)
+
+ hookenv.log("Creating proxy-related RBAC resources.")
+ if kubectl_manifest("apply", rbac_proxy_path):
+ remove_state("kubernetes-master.create.rbac")
+ else:
+ msg = "Failed to apply {}, will retry.".format(rbac_proxy_path)
+ hookenv.log(msg)
+
+
+@when("leadership.is_leader", "kubernetes-master.components.started")
+@when_not("kubernetes-master.system-monitoring-rbac-role.applied")
+def apply_system_monitoring_rbac_role():
+ try:
+ hookenv.status_set("maintenance", "Applying system:monitoring RBAC role")
+ path = "/root/cdk/system-monitoring-rbac-role.yaml"
+ render("system-monitoring-rbac-role.yaml", path, {})
+ kubectl("apply", "-f", path)
+ set_state("kubernetes-master.system-monitoring-rbac-role.applied")
+ except Exception:
+ hookenv.log(traceback.format_exc())
+ hookenv.log("Waiting to retry applying system:monitoring RBAC role")
+ return
+
+
+@when(
+ "leadership.is_leader",
+ "kubernetes-master.components.started",
+ "kubernetes-master.remove.rbac",
+)
+def remove_rbac_resources():
+ rbac_proxy_path = "/root/cdk/rbac-proxy.yaml"
+ if os.path.isfile(rbac_proxy_path):
+ hookenv.log("Removing proxy-related RBAC resources.")
+ if kubectl_manifest("delete", rbac_proxy_path):
+ os.remove(rbac_proxy_path)
+ remove_state("kubernetes-master.remove.rbac")
+ else:
+ msg = "Failed to delete {}, will retry.".format(rbac_proxy_path)
+ hookenv.log(msg)
+ else:
+ # if we dont have the yaml, there's nothing for us to do
+ remove_state("kubernetes-master.remove.rbac")
+
+
+@when("kubernetes-master.components.started")
+@when("nrpe-external-master.available")
+@when_any("config.changed.nagios_context", "config.changed.nagios_servicegroups")
+def update_nrpe_config():
+ services = ["snap.{}.daemon".format(s) for s in master_services]
+
+ plugin = install_nagios_plugin_from_file(
+ "templates/nagios_plugin.py", "check_k8s_master.py"
+ )
+ hostname = nrpe.get_nagios_hostname()
+ current_unit = nrpe.get_nagios_unit_name()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+ nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
+ nrpe_setup.add_check(
+ "k8s-api-server",
+ "Verify that the Kubernetes API server is accessible",
+ str(plugin),
+ )
+ nrpe_setup.write()
+
+
+@when_not("nrpe-external-master.available")
+@when("nrpe-external-master.initial-config")
+def remove_nrpe_config():
+ # List of systemd services for which the checks will be removed
+ services = ["snap.{}.daemon".format(s) for s in master_services]
+
+ remove_nagios_plugin("check_k8s_master.py")
+
+ # The current nrpe-external-master interface doesn't handle a lot of logic,
+ # use the charm-helpers code for now.
+ hostname = nrpe.get_nagios_hostname()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+
+ for service in services:
+ nrpe_setup.remove_check(shortname=service)
+ nrpe_setup.remove_check(shortname="k8s-api-server")
+ remove_state("nrpe-external-master.initial-config")
+
+
+def is_privileged():
+ """Return boolean indicating whether or not to set allow-privileged=true."""
+ privileged = hookenv.config("allow-privileged").lower()
+ if privileged == "auto":
+ return (
+ is_state("kubernetes-master.gpu.enabled")
+ or is_state("ceph-storage.available")
+ or is_state("endpoint.openstack.joined")
+ )
+ else:
+ return privileged == "true"
+
+
+@when("config.changed.allow-privileged")
+@when("kubernetes-master.components.started")
+def on_config_allow_privileged_change():
+ """React to changed 'allow-privileged' config value."""
+ remove_state("kubernetes-master.components.started")
+ remove_state("config.changed.allow-privileged")
+
+
+@when_any(
+ "config.changed.api-extra-args",
+ "config.changed.audit-policy",
+ "config.changed.audit-webhook-config",
+ "config.changed.enable-keystone-authorization",
+ "config.changed.service-cidr",
+)
+@when("kubernetes-master.components.started")
+@when("leadership.set.auto_storage_backend")
+@when("etcd.available")
+def reconfigure_apiserver():
+ clear_flag("kubernetes-master.apiserver.configured")
+
+
+@when("config.changed.controller-manager-extra-args")
+@when("kubernetes-master.components.started")
+def on_config_controller_manager_extra_args_change():
+ configure_controller_manager()
+
+
+@when("config.changed.scheduler-extra-args")
+@when("kubernetes-master.components.started")
+def on_config_scheduler_extra_args_change():
+ configure_scheduler()
+
+
+@when("kube-control.gpu.available")
+@when("kubernetes-master.components.started")
+@when_not("kubernetes-master.gpu.enabled")
+def on_gpu_available(kube_control):
+ """The remote side (kubernetes-worker) is gpu-enabled.
+
+ We need to run in privileged mode.
+
+ """
+ kube_version = get_version("kube-apiserver")
+ config = hookenv.config()
+ if config["allow-privileged"].lower() == "false" and kube_version < (1, 9):
+ return
+
+ remove_state("kubernetes-master.components.started")
+ set_state("kubernetes-master.gpu.enabled")
+
+
+@when("kubernetes-master.gpu.enabled")
+@when("kubernetes-master.components.started")
+@when_not("kubernetes-master.privileged")
+def gpu_with_no_privileged():
+ """We were in gpu mode, but the operator has set allow-privileged="false",
+ so we can't run in gpu mode anymore.
+
+ """
+ if get_version("kube-apiserver") < (1, 9):
+ remove_state("kubernetes-master.gpu.enabled")
+
+
+@when("kube-control.connected")
+@when_not("kube-control.gpu.available")
+@when("kubernetes-master.gpu.enabled")
+@when("kubernetes-master.components.started")
+def gpu_departed(kube_control):
+ """We were in gpu mode, but the workers informed us there is
+ no gpu support anymore.
+
+ """
+ remove_state("kubernetes-master.gpu.enabled")
+
+
+@hook("stop")
+def shutdown():
+ """Stop the kubernetes master services"""
+ for service in master_services:
+ service_stop("snap.%s.daemon" % service)
+
+
+@when(
+ "certificates.ca.available",
+ "certificates.client.cert.available",
+ "authentication.setup",
+)
+def build_kubeconfig():
+ """Gather the relevant data for Kubernetes configuration objects and create
+ a config object with that information."""
+ local_address = get_ingress_address("kube-api-endpoint")
+ local_server = "https://{0}:{1}".format(local_address, 6443)
+ public_address, public_port = kubernetes_master.get_api_endpoint()
+ public_server = "https://{0}:{1}".format(public_address, public_port)
+
+ # Do we have everything we need?
+ if ca_crt_path.exists():
+ client_pass = get_token("admin")
+ if not client_pass:
+ # If we made it this far without a password, we're bootstrapping a new
+ # cluster. Create a new token so we can build an admin kubeconfig. The
+ # auth-webhook service will ack this value from the kubeconfig file,
+ # allowing us to continue until the master is started and a proper
+ # secret can be created.
+ client_pass = (
+ hookenv.config("client_password") or kubernetes_master.token_generator()
+ )
+ client_pass = "admin::{}".format(client_pass)
+
+ # drop keystone helper script?
+ ks = endpoint_from_flag("keystone-credentials.available")
+ if ks:
+ script_filename = "kube-keystone.sh"
+ keystone_path = os.path.join(os.sep, "home", "ubuntu", script_filename)
+ context = {
+ "protocol": ks.credentials_protocol(),
+ "address": ks.credentials_host(),
+ "port": ks.credentials_port(),
+ "version": ks.api_version(),
+ }
+ render(script_filename, keystone_path, context)
+ elif is_state("leadership.set.keystone-cdk-addons-configured"):
+ # if addons are configured, we're going to do keystone
+ # just not yet because we don't have creds
+ hookenv.log("Keystone endpoint not found, will retry.")
+
+ cluster_id = None
+ aws_iam = endpoint_from_flag("endpoint.aws-iam.available")
+ if aws_iam:
+ cluster_id = aws_iam.get_cluster_id()
+
+ # Create an absolute path for the kubeconfig file.
+ kubeconfig_path = os.path.join(os.sep, "home", "ubuntu", "config")
+
+ # Create the kubeconfig on this system so users can access the cluster.
+ hookenv.log("Writing kubeconfig file.")
+
+ if ks:
+ create_kubeconfig(
+ kubeconfig_path,
+ public_server,
+ ca_crt_path,
+ user="admin",
+ token=client_pass,
+ keystone=True,
+ aws_iam_cluster_id=cluster_id,
+ )
+ else:
+ create_kubeconfig(
+ kubeconfig_path,
+ public_server,
+ ca_crt_path,
+ user="admin",
+ token=client_pass,
+ aws_iam_cluster_id=cluster_id,
+ )
+
+ # Make the config file readable by the ubuntu users so juju scp works.
+ cmd = ["chown", "ubuntu:ubuntu", kubeconfig_path]
+ check_call(cmd)
+
+ # make a kubeconfig for root (same location on k8s-masters and workers)
+ create_kubeconfig(
+ kubeclientconfig_path,
+ local_server,
+ ca_crt_path,
+ user="admin",
+ token=client_pass,
+ )
+
+ # make a kubeconfig for cdk-addons
+ create_kubeconfig(
+ cdk_addons_kubectl_config_path,
+ local_server,
+ ca_crt_path,
+ user="admin",
+ token=client_pass,
+ )
+
+ # make a kubeconfig for our services
+ proxy_token = get_token("system:kube-proxy")
+ if proxy_token:
+ create_kubeconfig(
+ kubeproxyconfig_path,
+ local_server,
+ ca_crt_path,
+ token=proxy_token,
+ user="kube-proxy",
+ )
+ controller_manager_token = get_token("system:kube-controller-manager")
+ if controller_manager_token:
+ create_kubeconfig(
+ kubecontrollermanagerconfig_path,
+ local_server,
+ ca_crt_path,
+ token=controller_manager_token,
+ user="kube-controller-manager",
+ )
+ scheduler_token = get_token("system:kube-scheduler")
+ if scheduler_token:
+ create_kubeconfig(
+ kubeschedulerconfig_path,
+ local_server,
+ ca_crt_path,
+ token=scheduler_token,
+ user="kube-scheduler",
+ )
+
+
+def handle_etcd_relation(reldata):
+ """Save the client credentials and set appropriate daemon flags when
+ etcd declares itself as available"""
+ # Define where the etcd tls files will be kept.
+ etcd_dir = "/root/cdk/etcd"
+
+ # Create paths to the etcd client ca, key, and cert file locations.
+ ca = os.path.join(etcd_dir, "client-ca.pem")
+ key = os.path.join(etcd_dir, "client-key.pem")
+ cert = os.path.join(etcd_dir, "client-cert.pem")
+
+ # Save the client credentials (in relation data) to the paths provided.
+ reldata.save_client_credentials(key, cert, ca)
+
+
+def remove_if_exists(path):
+ try:
+ os.remove(path)
+ except FileNotFoundError:
+ pass
+
+
+def write_file_with_autogenerated_header(path, contents):
+ with open(path, "w") as f:
+ header = "# Autogenerated by kubernetes-master charm"
+ f.write(header + "\n" + contents)
+
+
+@when(
+ "etcd.available", "cni.available", "kubernetes-master.auth-webhook-service.started"
+)
+@when_not("kubernetes-master.apiserver.configured")
+def configure_apiserver():
+ etcd_connection_string = endpoint_from_flag(
+ "etcd.available"
+ ).get_connection_string()
+ if not etcd_connection_string:
+ # etcd is not returning a connection string. This happens when
+ # the master unit disconnects from etcd and is ready to terminate.
+ # No point in trying to start master services and fail. Just return.
+ return
+
+ # Update unit db service-cidr
+ was_service_cidr_expanded = kubernetes_master.is_service_cidr_expansion()
+ kubernetes_master.freeze_service_cidr()
+
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ service_cidr = kubernetes_master.service_cidr()
+
+ api_opts = {}
+
+ if is_privileged():
+ api_opts["allow-privileged"] = "true"
+ set_state("kubernetes-master.privileged")
+ else:
+ api_opts["allow-privileged"] = "false"
+ remove_state("kubernetes-master.privileged")
+
+ # Handle static options for now
+ api_opts["service-cluster-ip-range"] = service_cidr
+ if kubernetes_common.is_dual_stack(cluster_cidr):
+ api_opts["feature-gates"] = "IPv6DualStack=true"
+ api_opts["min-request-timeout"] = "300"
+ api_opts["v"] = "4"
+ api_opts["tls-cert-file"] = str(server_crt_path)
+ api_opts["tls-private-key-file"] = str(server_key_path)
+ api_opts["kubelet-certificate-authority"] = str(ca_crt_path)
+ api_opts["kubelet-client-certificate"] = str(client_crt_path)
+ api_opts["kubelet-client-key"] = str(client_key_path)
+ api_opts["kubelet-https"] = "true"
+ api_opts["logtostderr"] = "true"
+ api_opts["storage-backend"] = getStorageBackend()
+ api_opts["insecure-port"] = "0"
+ api_opts["profiling"] = "false"
+
+ api_opts["anonymous-auth"] = "false"
+ api_opts["authentication-token-webhook-cache-ttl"] = "1m0s"
+ api_opts["authentication-token-webhook-config-file"] = auth_webhook_conf
+ api_opts["service-account-issuer"] = "https://kubernetes.default.svc"
+ api_opts["service-account-signing-key-file"] = "/root/cdk/serviceaccount.key"
+ api_opts["service-account-key-file"] = "/root/cdk/serviceaccount.key"
+ api_opts[
+ "kubelet-preferred-address-types"
+ ] = "InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP"
+ api_opts["encryption-provider-config"] = str(encryption_config_path())
+ if kubernetes_common.is_ipv6(cluster_cidr):
+ api_opts["bind-address"] = "::"
+ if kubernetes_common.is_ipv6_preferred(cluster_cidr):
+ api_opts["advertise-address"] = get_ingress_address6("kube-control")
+ else:
+ api_opts["advertise-address"] = get_ingress_address("kube-control")
+
+ etcd_dir = "/root/cdk/etcd"
+ etcd_ca = os.path.join(etcd_dir, "client-ca.pem")
+ etcd_key = os.path.join(etcd_dir, "client-key.pem")
+ etcd_cert = os.path.join(etcd_dir, "client-cert.pem")
+
+ api_opts["etcd-cafile"] = etcd_ca
+ api_opts["etcd-keyfile"] = etcd_key
+ api_opts["etcd-certfile"] = etcd_cert
+ api_opts["etcd-servers"] = etcd_connection_string
+
+ # In Kubernetes 1.10 and later, some admission plugins are enabled by
+ # default. The current list of default plugins can be found at
+ # https://bit.ly/2meP9XT, listed under the '--enable-admission-plugins'
+ # option.
+ #
+ # The list below need only include the plugins we want to enable
+ # in addition to the defaults.
+
+ admission_plugins = [
+ "PersistentVolumeLabel",
+ "PodSecurityPolicy",
+ "NodeRestriction",
+ ]
+
+ auth_mode = hookenv.config("authorization-mode")
+
+ ks = endpoint_from_flag("keystone-credentials.available")
+ if ks:
+ ks_ip = None
+ ks_ip = get_service_ip("k8s-keystone-auth-service", errors_fatal=False)
+ if ks and ks_ip:
+ os.makedirs(keystone_root, exist_ok=True)
+
+ keystone_webhook = keystone_root + "/webhook.yaml"
+ context = {}
+ context["keystone_service_cluster_ip"] = ks_ip
+ render("keystone-api-server-webhook.yaml", keystone_webhook, context)
+
+ if hookenv.config("enable-keystone-authorization"):
+ # if user wants authorization, enable it
+ if "Webhook" not in auth_mode:
+ auth_mode += ",Webhook"
+ api_opts["authorization-webhook-config-file"] = keystone_webhook # noqa
+ set_state("keystone.apiserver.configured")
+ else:
+ if ks and not ks_ip:
+ hookenv.log(
+ "Unable to find k8s-keystone-auth-service " "service. Will retry"
+ )
+ # Note that we can get into a nasty state here
+ # if the user has specified webhook and they're relying on
+ # keystone auth to handle that, the api server will fail to
+ # start because we push it Webhook and no webhook config.
+ # We can't generate the config because we can't talk to the
+ # apiserver to get the ip of the service to put into the
+ # webhook template. A chicken and egg problem. To fix this,
+ # remove Webhook if keystone is related and trying to come
+ # up until we can find the service IP.
+ if "Webhook" in auth_mode:
+ auth_mode = ",".join(
+ [i for i in auth_mode.split(",") if i != "Webhook"]
+ )
+ elif is_state("leadership.set.keystone-cdk-addons-configured"):
+ hookenv.log("Unable to find keystone endpoint. Will retry")
+ remove_state("keystone.apiserver.configured")
+
+ api_opts["authorization-mode"] = auth_mode
+ api_opts["enable-admission-plugins"] = ",".join(admission_plugins)
+
+ kube_version = get_version("kube-apiserver")
+
+ if kube_version > (1, 6) and hookenv.config("enable-metrics"):
+ api_opts["requestheader-client-ca-file"] = str(ca_crt_path)
+ api_opts["requestheader-allowed-names"] = "system:kube-apiserver,client"
+ api_opts["requestheader-extra-headers-prefix"] = "X-Remote-Extra-"
+ api_opts["requestheader-group-headers"] = "X-Remote-Group"
+ api_opts["requestheader-username-headers"] = "X-Remote-User"
+ api_opts["proxy-client-cert-file"] = str(client_crt_path)
+ api_opts["proxy-client-key-file"] = str(client_key_path)
+ api_opts["enable-aggregator-routing"] = "true"
+ api_opts["client-ca-file"] = str(ca_crt_path)
+
+ api_cloud_config_path = cloud_config_path("kube-apiserver")
+ if is_state("endpoint.aws.ready"):
+ api_opts["cloud-provider"] = "aws"
+ elif is_state("endpoint.gcp.ready"):
+ api_opts["cloud-provider"] = "gce"
+ api_opts["cloud-config"] = str(api_cloud_config_path)
+ elif is_state("endpoint.vsphere.ready") and get_version("kube-apiserver") >= (
+ 1,
+ 12,
+ ):
+ api_opts["cloud-provider"] = "vsphere"
+ api_opts["cloud-config"] = str(api_cloud_config_path)
+ elif is_state("endpoint.azure.ready"):
+ api_opts["cloud-provider"] = "azure"
+ api_opts["cloud-config"] = str(api_cloud_config_path)
+
+ audit_root = "/root/cdk/audit"
+ os.makedirs(audit_root, exist_ok=True)
+
+ audit_log_path = audit_root + "/audit.log"
+ api_opts["audit-log-path"] = audit_log_path
+ api_opts["audit-log-maxage"] = "30"
+ api_opts["audit-log-maxsize"] = "100"
+ api_opts["audit-log-maxbackup"] = "10"
+
+ audit_policy_path = audit_root + "/audit-policy.yaml"
+ audit_policy = hookenv.config("audit-policy")
+ if audit_policy:
+ write_file_with_autogenerated_header(audit_policy_path, audit_policy)
+ api_opts["audit-policy-file"] = audit_policy_path
+ else:
+ remove_if_exists(audit_policy_path)
+
+ audit_webhook_config_path = audit_root + "/audit-webhook-config.yaml"
+ audit_webhook_config = hookenv.config("audit-webhook-config")
+ if audit_webhook_config:
+ write_file_with_autogenerated_header(
+ audit_webhook_config_path, audit_webhook_config
+ )
+ api_opts["audit-webhook-config-file"] = audit_webhook_config_path
+ else:
+ remove_if_exists(audit_webhook_config_path)
+
+ configure_kubernetes_service(
+ configure_prefix, "kube-apiserver", api_opts, "api-extra-args"
+ )
+ service_restart("snap.kube-apiserver.daemon")
+
+ if was_service_cidr_expanded and is_state("leadership.is_leader"):
+ set_flag("kubernetes-master.had-service-cidr-expanded")
+
+ set_flag("kubernetes-master.apiserver.configured")
+
+
+@when(
+ "kubernetes-master.had-service-cidr-expanded",
+ "kubernetes-master.apiserver.configured",
+ "leadership.is_leader",
+)
+def update_for_service_cidr_expansion():
+ # We just restarted the API server, so there's a decent chance it's
+ # not up yet. Keep trying to get the svcs list until we can; get_svcs
+ # has a built-in retry and delay, so this should try for around 30s.
+ def _wait_for_svc_ip():
+ for attempt in range(10):
+ svcs = get_svcs()
+ if svcs:
+ svc_ip = {
+ svc["metadata"]["name"]: svc["spec"]["clusterIP"]
+ for svc in svcs["items"]
+ }.get("kubernetes")
+ if svc_ip:
+ return svc_ip
+ else:
+ return None
+
+ hookenv.log("service-cidr expansion: Waiting for API service")
+ # First network is the default, which is used for the API service's address.
+ # This logic will likely need to change once dual-stack services are
+ # supported: https://bit.ly/2YlbxOx
+ expected_service_ip = kubernetes_master.get_kubernetes_service_ips()[0]
+ actual_service_ip = _wait_for_svc_ip()
+ if not actual_service_ip:
+ hookenv.log("service-cidr expansion: Timed out waiting for API service")
+ return
+ try:
+ if actual_service_ip != expected_service_ip:
+ hookenv.log("service-cidr expansion: Deleting service kubernetes")
+ kubectl("delete", "service", "kubernetes")
+ actual_service_ip = _wait_for_svc_ip()
+ if not actual_service_ip:
+ # we might need another restart to get the service recreated
+ hookenv.log(
+ "service-cidr expansion: Timed out waiting for "
+ "the service to return; restarting API server"
+ )
+ clear_flag("kubernetes-master.apiserver.configured")
+ return
+ if actual_service_ip != expected_service_ip:
+ raise ValueError(
+ "Unexpected service IP: {} != {}".format(
+ actual_service_ip, expected_service_ip
+ )
+ )
+
+ # Restart the cdk-addons
+ # Get deployments/daemonsets/statefulsets
+ hookenv.log("service-cidr expansion: Restart the cdk-addons")
+ output = kubectl(
+ "get",
+ "daemonset,deployment,statefulset",
+ "-o",
+ "json",
+ "--all-namespaces",
+ "-l",
+ "cdk-restart-on-ca-change=true",
+ ).decode("UTF-8")
+ deployments = json.loads(output)["items"]
+
+ # Now restart the addons
+ for deployment in deployments:
+ kind = deployment["kind"]
+ namespace = deployment["metadata"]["namespace"]
+ name = deployment["metadata"]["name"]
+ hookenv.log("Restarting addon: {0} {1} {2}".format(kind, namespace, name))
+ kubectl("rollout", "restart", kind + "/" + name, "-n", namespace)
+ except CalledProcessError:
+ # the kubectl calls already log the command and don't capture stderr,
+ # so logging the exception is a bit superfluous
+ hookenv.log("service-cidr expansion: failed to restart components")
+ else:
+ clear_flag("kubernetes-master.had-service-cidr-expanded")
+
+
+def configure_controller_manager():
+ controller_opts = {}
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ service_cidr = kubernetes_master.service_cidr()
+
+ # Default to 3 minute resync. TODO: Make this configurable?
+ controller_opts["min-resync-period"] = "3m"
+ controller_opts["v"] = "2"
+ controller_opts["root-ca-file"] = str(ca_crt_path)
+ controller_opts["logtostderr"] = "true"
+ controller_opts["kubeconfig"] = kubecontrollermanagerconfig_path
+ controller_opts["authorization-kubeconfig"] = kubecontrollermanagerconfig_path
+ controller_opts["authentication-kubeconfig"] = kubecontrollermanagerconfig_path
+ controller_opts["use-service-account-credentials"] = "true"
+ controller_opts["service-account-private-key-file"] = "/root/cdk/serviceaccount.key"
+ controller_opts["tls-cert-file"] = str(server_crt_path)
+ controller_opts["tls-private-key-file"] = str(server_key_path)
+ controller_opts["cluster-name"] = leader_get("cluster_tag")
+ controller_opts["terminated-pod-gc-threshold"] = "12500"
+ controller_opts["profiling"] = "false"
+ controller_opts["feature-gates"] = "RotateKubeletServerCertificate=true"
+ controller_opts["service-cluster-ip-range"] = service_cidr
+ controller_opts["cluster-cidr"] = cluster_cidr
+ if kubernetes_common.is_dual_stack(cluster_cidr):
+ controller_opts["feature-gates"] = "IPv6DualStack=true"
+ net_ipv6 = kubernetes_common.get_ipv6_network(cluster_cidr)
+ if net_ipv6:
+ controller_opts["node-cidr-mask-size-ipv6"] = net_ipv6.prefixlen
+
+ cm_cloud_config_path = cloud_config_path("kube-controller-manager")
+ if is_state("endpoint.aws.ready"):
+ controller_opts["cloud-provider"] = "aws"
+ elif is_state("endpoint.gcp.ready"):
+ controller_opts["cloud-provider"] = "gce"
+ controller_opts["cloud-config"] = str(cm_cloud_config_path)
+ elif is_state("endpoint.vsphere.ready") and get_version("kube-apiserver") >= (
+ 1,
+ 12,
+ ):
+ controller_opts["cloud-provider"] = "vsphere"
+ controller_opts["cloud-config"] = str(cm_cloud_config_path)
+ elif is_state("endpoint.azure.ready"):
+ controller_opts["cloud-provider"] = "azure"
+ controller_opts["cloud-config"] = str(cm_cloud_config_path)
+
+ configure_kubernetes_service(
+ configure_prefix,
+ "kube-controller-manager",
+ controller_opts,
+ "controller-manager-extra-args",
+ )
+ service_restart("snap.kube-controller-manager.daemon")
+
+
+def configure_scheduler():
+ kube_scheduler_config_path = "/root/cdk/kube-scheduler-config.yaml"
+
+ scheduler_opts = {}
+
+ scheduler_opts["v"] = "2"
+ scheduler_opts["logtostderr"] = "true"
+ scheduler_opts["profiling"] = "false"
+ scheduler_opts["config"] = kube_scheduler_config_path
+
+ scheduler_ver = get_version("kube-scheduler")
+ if scheduler_ver >= (1, 19):
+ api_ver = "v1beta1"
+ elif scheduler_ver >= (1, 18):
+ api_ver = "v1alpha2"
+ else:
+ api_ver = "v1alpha1"
+
+ host.write_file(
+ path=kube_scheduler_config_path,
+ perms=0o600,
+ content=yaml.safe_dump(
+ {
+ "apiVersion": "kubescheduler.config.k8s.io/{}".format(api_ver),
+ "kind": "KubeSchedulerConfiguration",
+ "clientConnection": {"kubeconfig": kubeschedulerconfig_path},
+ }
+ ),
+ )
+
+ configure_kubernetes_service(
+ configure_prefix, "kube-scheduler", scheduler_opts, "scheduler-extra-args"
+ )
+
+ service_restart("snap.kube-scheduler.daemon")
+
+
+def setup_tokens(token, username, user, groups=None):
+ """Create a token for kubernetes authentication.
+
+ Create a new secret if known_tokens have been migrated. Otherwise,
+ add an entry to the 'known_tokens.csv' file.
+ """
+ if not token:
+ token = kubernetes_master.token_generator()
+ if is_flag_set("kubernetes-master.token-auth.migrated"):
+ # We need the apiserver before we can create secrets.
+ if is_flag_set("kubernetes-master.apiserver.configured"):
+ kubernetes_master.create_secret(token, username, user, groups)
+ else:
+ hookenv.log("Delaying secret creation until the apiserver is configured.")
+ else:
+ kubernetes_master.create_known_token(token, username, user, groups)
+
+
+def get_token(username):
+ """Fetch a token for the given username.
+
+ Grab a token from the given user's secret if known_tokens have been
+ migrated. Otherwise, fetch it from the 'known_tokens.csv' file.
+ """
+ if is_flag_set("kubernetes-master.token-auth.migrated"):
+ return kubernetes_master.get_secret_password(username)
+ else:
+ return kubernetes_master.get_csv_password("known_tokens.csv", username)
+
+
+def set_token(password, save_salt):
+ """Store a token so it can be recalled later by token_generator.
+
+ param: password - the password to be stored
+ param: save_salt - the key to store the value of the token."""
+ db.set(save_salt, password)
+ return db.get(save_salt)
+
+
+@retry(times=3, delay_secs=1)
+def get_pods(namespace="default"):
+ try:
+ output = kubectl(
+ "get", "po", "-n", namespace, "-o", "json", "--request-timeout", "10s"
+ ).decode("UTF-8")
+ result = json.loads(output)
+ except CalledProcessError:
+ hookenv.log("failed to get {} pod status".format(namespace))
+ return None
+ return result
+
+
+@retry(times=3, delay_secs=1)
+def get_svcs(namespace="default"):
+ try:
+ output = kubectl(
+ "get", "svc", "-n", namespace, "-o", "json", "--request-timeout", "10s"
+ ).decode("UTF-8")
+ result = json.loads(output)
+ except CalledProcessError:
+ hookenv.log("failed to get {} service status".format(namespace))
+ return None
+ return result
+
+
+class FailedToGetPodStatus(Exception):
+ pass
+
+
+def get_kube_system_pods_not_running():
+ """Check pod status in the kube-system namespace. Throws
+ FailedToGetPodStatus if unable to determine pod status. This can
+ occur when the api server is not currently running. On success,
+ returns a list of pods that are not currently running
+ or an empty list if all are running."""
+
+ result = get_pods("kube-system")
+ if result is None:
+ raise FailedToGetPodStatus
+
+ hookenv.log(
+ "Checking system pods status: {}".format(
+ ", ".join(
+ "=".join([pod["metadata"]["name"], pod["status"]["phase"]])
+ for pod in result["items"]
+ )
+ )
+ )
+
+ # Pods that are Running or Evicted (which should re-spawn) are
+ # considered running
+ not_running = [
+ pod
+ for pod in result["items"]
+ if pod["status"]["phase"] != "Running"
+ and pod["status"].get("reason", "") != "Evicted"
+ ]
+
+ pending = [pod for pod in result["items"] if pod["status"]["phase"] == "Pending"]
+ any_pending = len(pending) > 0
+ if is_state("endpoint.gcp.ready") and any_pending:
+ poke_network_unavailable()
+ return not_running
+
+ return not_running
+
+
+def poke_network_unavailable():
+ """
+ Work around https://github.com/kubernetes/kubernetes/issues/44254 by
+ manually poking the status into the API server to tell the nodes they have
+ a network route.
+
+ This is needed because kubelet sets the NetworkUnavailable flag and expects
+ the network plugin to clear it, which only kubenet does. There is some
+ discussion about refactoring the affected code but nothing has happened
+ in a while.
+ """
+ local_address = get_ingress_address("kube-api-endpoint")
+ local_server = "https://{0}:{1}".format(local_address, 6443)
+
+ client_token = get_token("admin")
+ http_header = ("Authorization", "Bearer {}".format(client_token))
+
+ try:
+ output = kubectl("get", "nodes", "-o", "json").decode("utf-8")
+ nodes = json.loads(output)["items"]
+ except CalledProcessError:
+ hookenv.log("failed to get kube-system nodes")
+ return
+ except (KeyError, json.JSONDecodeError) as e:
+ hookenv.log(
+ "failed to parse kube-system node status " "({}): {}".format(e, output),
+ hookenv.ERROR,
+ )
+ return
+
+ for node in nodes:
+ node_name = node["metadata"]["name"]
+ url = "{}/api/v1/nodes/{}/status".format(local_server, node_name)
+ req = Request(url)
+ req.add_header(*http_header)
+ with urlopen(req) as response:
+ code = response.getcode()
+ body = response.read().decode("utf8")
+ if code != 200:
+ hookenv.log(
+ "failed to get node status from {} [{}]: {}".format(url, code, body),
+ hookenv.ERROR,
+ )
+ return
+ try:
+ node_info = json.loads(body)
+ conditions = node_info["status"]["conditions"]
+ i = [c["type"] for c in conditions].index("NetworkUnavailable")
+ if conditions[i]["status"] == "True":
+ hookenv.log("Clearing NetworkUnavailable from {}".format(node_name))
+ conditions[i] = {
+ "type": "NetworkUnavailable",
+ "status": "False",
+ "reason": "RouteCreated",
+ "message": "Manually set through k8s api",
+ }
+ req = Request(
+ url,
+ method="PUT",
+ data=json.dumps(node_info).encode("utf8"),
+ headers={"Content-Type": "application/json"},
+ )
+ req.add_header(*http_header)
+ with urlopen(req) as response:
+ code = response.getcode()
+ body = response.read().decode("utf8")
+ if code not in (200, 201, 202):
+ hookenv.log(
+ "failed to update node status [{}]: {}".format(code, body),
+ hookenv.ERROR,
+ )
+ return
+ except (json.JSONDecodeError, KeyError):
+ hookenv.log("failed to parse node status: {}".format(body), hookenv.ERROR)
+ return
+
+
+def apiserverVersion():
+ cmd = "kube-apiserver --version".split()
+ version_string = check_output(cmd).decode("utf-8")
+ return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
+
+
+def touch(fname):
+ try:
+ os.utime(fname, None)
+ except OSError:
+ open(fname, "a").close()
+
+
+def getStorageBackend():
+ storage_backend = hookenv.config("storage-backend")
+ if storage_backend == "auto":
+ storage_backend = leader_get("auto_storage_backend")
+ return storage_backend
+
+
+@when("leadership.is_leader")
+@when_not("leadership.set.cluster_tag")
+def create_cluster_tag():
+ cluster_tag = "kubernetes-{}".format(kubernetes_master.token_generator().lower())
+ leader_set(cluster_tag=cluster_tag)
+
+
+@when("leadership.set.cluster_tag", "kube-control.connected")
+def send_cluster_tag():
+ cluster_tag = leader_get("cluster_tag")
+ kube_control = endpoint_from_flag("kube-control.connected")
+ kube_control.set_cluster_tag(cluster_tag)
+
+
+@when_not("kube-control.connected")
+def clear_cluster_tag_sent():
+ remove_state("kubernetes-master.cluster-tag-sent")
+
+
+@when_any(
+ "endpoint.aws.joined",
+ "endpoint.gcp.joined",
+ "endpoint.openstack.joined",
+ "endpoint.vsphere.joined",
+ "endpoint.azure.joined",
+)
+@when_not("kubernetes-master.cloud.ready")
+def set_cloud_pending():
+ k8s_version = get_version("kube-apiserver")
+ k8s_1_11 = k8s_version >= (1, 11)
+ k8s_1_12 = k8s_version >= (1, 12)
+ vsphere_joined = is_state("endpoint.vsphere.joined")
+ azure_joined = is_state("endpoint.azure.joined")
+ if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
+ set_state("kubernetes-master.cloud.blocked")
+ else:
+ remove_state("kubernetes-master.cloud.blocked")
+ set_state("kubernetes-master.cloud.pending")
+
+
+@when_any("endpoint.aws.joined", "endpoint.gcp.joined", "endpoint.azure.joined")
+@when("leadership.set.cluster_tag")
+@when_not("kubernetes-master.cloud.request-sent")
+def request_integration():
+ hookenv.status_set("maintenance", "requesting cloud integration")
+ cluster_tag = leader_get("cluster_tag")
+ if is_state("endpoint.aws.joined"):
+ cloud = endpoint_from_flag("endpoint.aws.joined")
+ cloud.tag_instance(
+ {
+ "kubernetes.io/cluster/{}".format(cluster_tag): "owned",
+ "k8s.io/role/master": "true",
+ }
+ )
+ cloud.tag_instance_security_group(
+ {
+ "kubernetes.io/cluster/{}".format(cluster_tag): "owned",
+ }
+ )
+ cloud.tag_instance_subnet(
+ {
+ "kubernetes.io/cluster/{}".format(cluster_tag): "owned",
+ }
+ )
+ cloud.enable_object_storage_management(["kubernetes-*"])
+ cloud.enable_load_balancer_management()
+ elif is_state("endpoint.gcp.joined"):
+ cloud = endpoint_from_flag("endpoint.gcp.joined")
+ cloud.label_instance(
+ {
+ "k8s-io-cluster-name": cluster_tag,
+ "k8s-io-role-master": "master",
+ }
+ )
+ cloud.enable_object_storage_management()
+ cloud.enable_security_management()
+ elif is_state("endpoint.azure.joined"):
+ cloud = endpoint_from_flag("endpoint.azure.joined")
+ cloud.tag_instance(
+ {
+ "k8s-io-cluster-name": cluster_tag,
+ "k8s-io-role-master": "master",
+ }
+ )
+ cloud.enable_object_storage_management()
+ cloud.enable_security_management()
+ cloud.enable_loadbalancer_management()
+ cloud.enable_instance_inspection()
+ cloud.enable_network_management()
+ cloud.enable_dns_management()
+ cloud.enable_block_storage_management()
+ set_state("kubernetes-master.cloud.request-sent")
+
+
+@when_none(
+ "endpoint.aws.joined",
+ "endpoint.gcp.joined",
+ "endpoint.openstack.joined",
+ "endpoint.vsphere.joined",
+ "endpoint.azure.joined",
+)
+@when_any(
+ "kubernetes-master.cloud.pending",
+ "kubernetes-master.cloud.request-sent",
+ "kubernetes-master.cloud.blocked",
+ "kubernetes-master.cloud.ready",
+)
+def clear_cloud_flags():
+ remove_state("kubernetes-master.cloud.pending")
+ remove_state("kubernetes-master.cloud.request-sent")
+ remove_state("kubernetes-master.cloud.blocked")
+ remove_state("kubernetes-master.cloud.ready")
+ clear_flag("kubernetes-master.apiserver.configured")
+ _kick_controller_manager()
+
+
+@when_any(
+ "endpoint.aws.ready",
+ "endpoint.gcp.ready",
+ "endpoint.openstack.ready",
+ "endpoint.vsphere.ready",
+ "endpoint.azure.ready",
+)
+@when_not("kubernetes-master.cloud.blocked", "kubernetes-master.cloud.ready")
+def cloud_ready():
+ if is_state("endpoint.gcp.ready"):
+ write_gcp_snap_config("kube-apiserver")
+ write_gcp_snap_config("kube-controller-manager")
+ elif is_state("endpoint.vsphere.ready"):
+ _write_vsphere_snap_config("kube-apiserver")
+ _write_vsphere_snap_config("kube-controller-manager")
+ elif is_state("endpoint.azure.ready"):
+ write_azure_snap_config("kube-apiserver")
+ write_azure_snap_config("kube-controller-manager")
+ remove_state("kubernetes-master.cloud.pending")
+ set_state("kubernetes-master.cloud.ready")
+ remove_state("kubernetes-master.components.started") # force restart
+
+
+@when("kubernetes-master.cloud.ready")
+@when_any(
+ "endpoint.openstack.ready.changed",
+ "endpoint.vsphere.ready.changed",
+ "endpoint.azure.ready.changed",
+)
+def update_cloud_config():
+ """Signal that cloud config has changed.
+
+ Some clouds (openstack, vsphere) support runtime config that needs to be
+ reflected in the k8s cloud config files when changed. Manage flags to
+ ensure this happens.
+ """
+ if is_state("endpoint.openstack.ready.changed"):
+ remove_state("endpoint.openstack.ready.changed")
+ set_state("kubernetes-master.openstack.changed")
+ if is_state("endpoint.vsphere.ready.changed"):
+ remove_state("kubernetes-master.cloud.ready")
+ remove_state("endpoint.vsphere.ready.changed")
+ if is_state("endpoint.azure.ready.changed"):
+ remove_state("kubernetes-master.cloud.ready")
+ remove_state("endpoint.azure.ready.changed")
+
+
+def _cdk_addons_template_path():
+ return Path("/snap/cdk-addons/current/templates")
+
+
+def _write_vsphere_snap_config(component):
+ # vsphere requires additional cloud config
+ vsphere = endpoint_from_flag("endpoint.vsphere.ready")
+
+ # NB: vsphere provider will ask kube-apiserver and -controller-manager to
+ # find a uuid from sysfs unless a global config value is set. Our strict
+ # snaps cannot read sysfs, so let's do it in the charm. An invalid uuid is
+ # not fatal for storage, but it will muddy the logs; try to get it right.
+ uuid = _get_vmware_uuid()
+
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text(
+ "\n".join(
+ [
+ "[Global]",
+ "insecure-flag = true",
+ 'datacenters = "{}"'.format(vsphere.datacenter),
+ 'vm-uuid = "VMware-{}"'.format(uuid),
+ '[VirtualCenter "{}"]'.format(vsphere.vsphere_ip),
+ 'user = "{}"'.format(vsphere.user),
+ 'password = "{}"'.format(vsphere.password),
+ "[Workspace]",
+ 'server = "{}"'.format(vsphere.vsphere_ip),
+ 'datacenter = "{}"'.format(vsphere.datacenter),
+ 'default-datastore = "{}"'.format(vsphere.datastore),
+ 'folder = "{}"'.format(vsphere.folder),
+ 'resourcepool-path = "{}"'.format(vsphere.respool_path),
+ "[Disk]",
+ 'scsicontrollertype = "pvscsi"',
+ ]
+ )
+ )
+
+
+@when("config.changed.keystone-policy")
+@when("kubernetes-master.keystone-policy-handled")
+def regen_keystone_policy():
+ clear_flag("kubernetes-master.keystone-policy-handled")
+
+
+@when(
+ "keystone-credentials.available",
+ "leadership.is_leader",
+ "kubernetes-master.apiserver.configured",
+)
+@when_not("kubernetes-master.keystone-policy-handled")
+def generate_keystone_configmap():
+ keystone_policy = hookenv.config("keystone-policy")
+ if keystone_policy:
+ os.makedirs(keystone_root, exist_ok=True)
+ write_file_with_autogenerated_header(keystone_policy_path, keystone_policy)
+ if kubectl_manifest("apply", keystone_policy_path):
+ set_flag("kubernetes-master.keystone-policy-handled")
+ clear_flag("kubernetes-master.keystone-policy-error")
+ else:
+ set_flag("kubernetes-master.keystone-policy-error")
+ else:
+ # a missing policy configmap will crashloop the pods, but...
+ # what do we do in this situation. We could just do nothing,
+ # but that isn't cool for the user so we surface an error
+ # and wait for them to fix it.
+ set_flag("kubernetes-master.keystone-policy-error")
+
+ # note that information is surfaced to the user in the code above where we
+ # write status. It will notify the user we are waiting on the policy file
+ # to apply if the keystone-credentials.available flag is set, but
+ # kubernetes-master.keystone-policy-handled is not set.
+
+
+@when("leadership.is_leader", "kubernetes-master.keystone-policy-handled")
+@when_not("keystone-credentials.available")
+def remove_keystone():
+ clear_flag("kubernetes-master.apiserver.configured")
+ if not os.path.exists(keystone_policy_path):
+ clear_flag("kubernetes-master.keystone-policy-handled")
+ elif kubectl_manifest("delete", keystone_policy_path):
+ os.remove(keystone_policy_path)
+ clear_flag("kubernetes-master.keystone-policy-handled")
+
+
+@when("keystone-credentials.connected")
+def setup_keystone_user():
+ # This seems silly, but until we request a user from keystone
+ # we don't get information about the keystone server...
+ ks = endpoint_from_flag("keystone-credentials.connected")
+ ks.request_credentials("k8s")
+
+
+def _kick_controller_manager():
+ if is_flag_set("kubernetes-master.components.started"):
+ configure_controller_manager()
+
+
+@when(
+ "keystone.credentials.configured", "leadership.set.keystone-cdk-addons-configured"
+)
+@when_not("keystone.apiserver.configured")
+def keystone_kick_apiserver():
+ clear_flag("kubernetes-master.apiserver.configured")
+
+
+@when(
+ "keystone-credentials.available",
+ "certificates.ca.available",
+ "certificates.client.cert.available",
+ "authentication.setup",
+ "etcd.available",
+ "leadership.set.keystone-cdk-addons-configured",
+)
+def keystone_config():
+ # first, we have to have the service set up before we can render this stuff
+ ks = endpoint_from_flag("keystone-credentials.available")
+ data = {
+ "host": ks.credentials_host(),
+ "proto": ks.credentials_protocol(),
+ "port": ks.credentials_port(),
+ "version": ks.api_version(),
+ }
+ if data_changed("keystone", data):
+ remove_state("keystone.credentials.configured")
+ clear_flag("kubernetes-master.apiserver.configured")
+ build_kubeconfig()
+ generate_keystone_configmap()
+ set_state("keystone.credentials.configured")
+
+
+@when("layer.vault-kv.app-kv.set.encryption_key", "layer.vaultlocker.ready")
+@when_not("kubernetes-master.secure-storage.created")
+def create_secure_storage():
+ encryption_conf_dir = encryption_config_path().parent
+ encryption_conf_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
+ try:
+ vaultlocker.create_encrypted_loop_mount(encryption_conf_dir)
+ except vaultlocker.VaultLockerError:
+ # One common cause of this would be deploying on lxd.
+ # Should this be more fatal?
+ hookenv.log(
+ "Unable to create encrypted mount for storing encryption config.\n"
+ "{}".format(traceback.format_exc()),
+ level=hookenv.ERROR,
+ )
+ set_flag("kubernetes-master.secure-storage.failed")
+ clear_flag("kubernetes-master.secure-storage.created")
+ else:
+ # TODO: If Vault isn't available, it's probably still better to encrypt
+ # anyway and store the key in plaintext and leadership than to just
+ # give up on encryption entirely.
+ _write_encryption_config()
+ # prevent an unnecessary service restart on this
+ # unit since we've already handled the change
+ clear_flag("layer.vault-kv.app-kv.changed.encryption_key")
+ # mark secure storage as ready
+ set_flag("kubernetes-master.secure-storage.created")
+ clear_flag("kubernetes-master.secure-storage.failed")
+ # restart to regen config
+ clear_flag("kubernetes-master.apiserver.configured")
+
+
+@when_not("layer.vaultlocker.ready")
+@when("kubernetes-master.secure-storage.created")
+def revert_secure_storage():
+ clear_flag("kubernetes-master.secure-storage.created")
+ clear_flag("kubernetes-master.secure-storage.failed")
+ clear_flag("kubernetes-master.apiserver.configured")
+
+
+@when("leadership.is_leader", "layer.vault-kv.ready")
+@when_not("layer.vault-kv.app-kv.set.encryption_key")
+def generate_encryption_key():
+ app_kv = vault_kv.VaultAppKV()
+ app_kv["encryption_key"] = kubernetes_master.token_generator(32)
+
+
+@when(
+ "layer.vault-kv.app-kv.changed.encryption_key",
+ "kubernetes-master.secure-storage.created",
+)
+def restart_apiserver_for_encryption_key():
+ clear_flag("kubernetes-master.apiserver.configured")
+ clear_flag("layer.vault-kv.app-kv.changed.encryption_key")
+
+
+def _write_encryption_config():
+ app_kv = vault_kv.VaultAppKV()
+ encryption_config_path().parent.mkdir(parents=True, exist_ok=True)
+ secret = app_kv["encryption_key"]
+ secret = base64.b64encode(secret.encode("utf8")).decode("utf8")
+ host.write_file(
+ path=str(encryption_config_path()),
+ perms=0o600,
+ content=yaml.safe_dump(
+ {
+ "kind": "EncryptionConfig",
+ "apiVersion": "v1",
+ "resources": [
+ {
+ "resources": ["secrets"],
+ "providers": [
+ {
+ "aescbc": {
+ "keys": [
+ {
+ "name": "key1",
+ "secret": secret,
+ }
+ ],
+ }
+ },
+ {"identity": {}},
+ ],
+ }
+ ],
+ }
+ ),
+ )
+
+
+@when_any("config.changed.ha-cluster-vip", "config.changed.ha-cluster-dns")
+def haconfig_changed():
+ clear_flag("hacluster-configured")
+
+
+@when("ha.connected", "kubernetes-master.components.started")
+@when_not("hacluster-configured")
+def configure_hacluster():
+ for service in master_services:
+ daemon = "snap.{}.daemon".format(service)
+ add_service_to_hacluster(service, daemon)
+
+ # get a new cert
+ if all_flags_set("certificates.available", "kube-api-endpoint.available"):
+ send_data()
+
+ # update workers
+ if is_state("kube-api-endpoint.available"):
+ push_service_data()
+
+ set_flag("hacluster-configured")
+
+
+@when_not("ha.connected")
+@when("hacluster-configured")
+def remove_hacluster():
+ for service in master_services:
+ daemon = "snap.{}.daemon".format(service)
+ remove_service_from_hacluster(service, daemon)
+
+ # get a new cert
+ if all_flags_set("certificates.available", "kube-api-endpoint.available"):
+ send_data()
+ # update workers
+ if is_state("kube-api-endpoint.available"):
+ push_service_data()
+
+ clear_flag("hacluster-configured")
+
+
+class InvalidDnsProvider(Exception):
+ def __init__(self, value):
+ self.value = value
+
+
+def get_dns_provider():
+ valid_dns_providers = ["auto", "core-dns", "kube-dns", "none"]
+ if get_version("kube-apiserver") < (1, 14):
+ valid_dns_providers.remove("core-dns")
+
+ dns_provider = hookenv.config("dns-provider").lower()
+ if dns_provider not in valid_dns_providers:
+ raise InvalidDnsProvider(dns_provider)
+
+ if dns_provider == "auto":
+ dns_provider = leader_get("auto_dns_provider")
+ # On new deployments, the first time this is called, auto_dns_provider
+ # hasn't been set yet. We need to make a choice now.
+ if not dns_provider:
+ if "core-dns" in valid_dns_providers:
+ dns_provider = "core-dns"
+ else:
+ dns_provider = "kube-dns"
+
+ # LP: 1833089. Followers end up here when setting final status; ensure only
+ # leaders call leader_set.
+ if is_state("leadership.is_leader"):
+ leader_set(auto_dns_provider=dns_provider)
+ return dns_provider
+
+
+@when("kube-control.connected")
+@when_not("kubernetes-master.sent-registry")
+def send_registry_location():
+ registry_location = hookenv.config("image-registry")
+ kube_control = endpoint_from_flag("kube-control.connected")
+
+ # Send registry to workers
+ kube_control.set_registry_location(registry_location)
+
+ # Construct and send the sandbox image (pause container) to our runtime
+ runtime = endpoint_from_flag("endpoint.container-runtime.available")
+ if runtime:
+ uri = "{}/pause-{}:3.1".format(registry_location, arch())
+ runtime.set_config(sandbox_image=uri)
+
+ set_flag("kubernetes-master.sent-registry")
+
+
+@when("config.changed.image-registry")
+def send_new_registry_location():
+ clear_flag("kubernetes-master.sent-registry")
+
+
+@when(
+ "leadership.is_leader",
+ "leadership.set.kubernetes-master-addons-restart-for-ca",
+ "kubernetes-master.components.started",
+)
+def restart_addons_for_ca():
+ try:
+ # Get deployments/daemonsets/statefulsets
+ output = kubectl(
+ "get",
+ "daemonset,deployment,statefulset",
+ "-o",
+ "json",
+ "--all-namespaces",
+ "-l",
+ "cdk-restart-on-ca-change=true",
+ ).decode("UTF-8")
+ deployments = json.loads(output)["items"]
+
+ # Get ServiceAccounts
+ service_account_names = set(
+ (
+ deployment["metadata"]["namespace"],
+ deployment["spec"]["template"]["spec"].get(
+ "serviceAccountName", "default"
+ ),
+ )
+ for deployment in deployments
+ )
+ service_accounts = []
+ for namespace, name in service_account_names:
+ output = kubectl(
+ "get", "ServiceAccount", name, "-o", "json", "-n", namespace
+ ).decode("UTF-8")
+ service_account = json.loads(output)
+ service_accounts.append(service_account)
+
+ # Get ServiceAccount secrets
+ secret_names = set()
+ for service_account in service_accounts:
+ namespace = service_account["metadata"]["namespace"]
+ for secret in service_account["secrets"]:
+ secret_names.add((namespace, secret["name"]))
+ secrets = []
+ for namespace, name in secret_names:
+ output = kubectl(
+ "get", "Secret", name, "-o", "json", "-n", namespace
+ ).decode("UTF-8")
+ secret = json.loads(output)
+ secrets.append(secret)
+
+ # Check secrets have updated CA
+ with open(ca_crt_path, "rb") as f:
+ ca = f.read()
+ encoded_ca = base64.b64encode(ca).decode("UTF-8")
+ mismatched_secrets = [
+ secret for secret in secrets if secret["data"]["ca.crt"] != encoded_ca
+ ]
+ if mismatched_secrets:
+ hookenv.log(
+ "ServiceAccount secrets do not have correct ca.crt: "
+ + ",".join(secret["metadata"]["name"] for secret in mismatched_secrets)
+ )
+ hookenv.log("Waiting to retry restarting addons")
+ return
+
+ # Now restart the addons
+ for deployment in deployments:
+ kind = deployment["kind"]
+ namespace = deployment["metadata"]["namespace"]
+ name = deployment["metadata"]["name"]
+ hookenv.log("Restarting addon: %s %s %s" % (kind, namespace, name))
+ kubectl("rollout", "restart", kind + "/" + name, "-n", namespace)
+
+ leader_set({"kubernetes-master-addons-restart-for-ca": None})
+ except Exception:
+ hookenv.log(traceback.format_exc())
+ hookenv.log("Waiting to retry restarting addons")
+
+
+def add_systemd_iptables_patch():
+ source = "templates/kube-proxy-iptables-fix.sh"
+ dest = "/usr/local/bin/kube-proxy-iptables-fix.sh"
+ copyfile(source, dest)
+ os.chmod(dest, 0o775)
+
+ template = "templates/service-iptables-fix.service"
+ dest_dir = "/etc/systemd/system"
+ os.makedirs(dest_dir, exist_ok=True)
+ service_name = "kube-proxy-iptables-fix.service"
+ copyfile(template, "{}/{}".format(dest_dir, service_name))
+
+ check_call(["systemctl", "daemon-reload"])
+
+ # enable and run the service
+ service_resume(service_name)
+
+
+@when(
+ "leadership.is_leader",
+ "kubernetes-master.components.started",
+ "endpoint.prometheus.joined",
+ "certificates.ca.available",
+)
+def register_prometheus_jobs():
+ prometheus = endpoint_from_flag("endpoint.prometheus.joined")
+ tls = endpoint_from_flag("certificates.ca.available")
+ monitoring_token = get_token("system:monitoring")
+
+ for relation in prometheus.relations:
+ address, port = kubernetes_master.get_api_endpoint(relation)
+
+ templates_dir = Path("templates")
+ for job_file in Path("templates/prometheus").glob("*.yaml.j2"):
+ prometheus.register_job(
+ relation=relation,
+ job_name=job_file.name.split(".")[0],
+ job_data=yaml.safe_load(
+ render(
+ source=str(job_file.relative_to(templates_dir)),
+ target=None, # don't write file, just return data
+ context={
+ "k8s_api_address": address,
+ "k8s_api_port": port,
+ "k8s_token": monitoring_token,
+ },
+ )
+ ),
+ ca_cert=tls.root_ca_cert,
+ )
+
+
+def detect_telegraf():
+ # Telegraf uses the implicit juju-info relation, which makes it difficult
+ # to tell if it's related. The "best" option is to look for the subordinate
+ # charm on disk.
+ for charm_dir in Path("/var/lib/juju/agents").glob("unit-*/charm"):
+ metadata = yaml.safe_load((charm_dir / "metadata.yaml").read_text())
+ if "telegraf" in metadata["name"]:
+ return True
+ else:
+ return False
+
+
+@when(
+ "leadership.is_leader",
+ "kubernetes-master.components.started",
+ "endpoint.grafana.joined",
+)
+def register_grafana_dashboards():
+ grafana = endpoint_from_flag("endpoint.grafana.joined")
+
+ # load conditional dashboards
+ dash_dir = Path("templates/grafana/conditional")
+ if is_flag_set("endpoint.prometheus.joined"):
+ dashboard = (dash_dir / "prometheus.json").read_text()
+ grafana.register_dashboard("prometheus", json.loads(dashboard))
+ if detect_telegraf():
+ dashboard = (dash_dir / "telegraf.json").read_text()
+ grafana.register_dashboard("telegraf", json.loads(dashboard))
+
+ # load automatic dashboards
+ dash_dir = Path("templates/grafana/autoload")
+ for dash_file in dash_dir.glob("*.json"):
+ dashboard = dash_file.read_text()
+ grafana.register_dashboard(dash_file.stem, json.loads(dashboard))
+
+
+@when("endpoint.aws-iam.ready")
+@when_not("kubernetes-master.aws-iam.configured")
+def enable_aws_iam_webhook():
+ # if etcd isn't available yet, we'll set this up later
+ # when we start the api server.
+ if is_flag_set("etcd.available"):
+ # call the other things we need to update
+ clear_flag("kubernetes-master.apiserver.configured")
+ build_kubeconfig()
+ set_flag("kubernetes-master.aws-iam.configured")
+
+
+@when("kubernetes-master.components.started", "endpoint.aws-iam.available")
+def api_server_started():
+ aws_iam = endpoint_from_flag("endpoint.aws-iam.available")
+ if aws_iam:
+ aws_iam.set_api_server_status(True)
+
+
+@when_not("kubernetes-master.components.started")
+@when("endpoint.aws-iam.available")
+def api_server_stopped():
+ aws_iam = endpoint_from_flag("endpoint.aws-iam.available")
+ if aws_iam:
+ aws_iam.set_api_server_status(False)
+
+
+@when("kube-control.connected")
+def send_default_cni():
+ """Send the value of the default-cni config to the kube-control relation.
+ This allows kubernetes-worker to use the same config value as well.
+ """
+ default_cni = hookenv.config("default-cni")
+ kube_control = endpoint_from_flag("kube-control.connected")
+ kube_control.set_default_cni(default_cni)
+
+
+@when("config.changed.default-cni")
+def default_cni_changed():
+ remove_state("kubernetes-master.components.started")
diff --git a/kubernetes-master/reactive/kubernetes_master_worker_base.py b/kubernetes-master/reactive/kubernetes_master_worker_base.py
new file mode 100644
index 0000000..0bb1de4
--- /dev/null
+++ b/kubernetes-master/reactive/kubernetes_master_worker_base.py
@@ -0,0 +1,88 @@
+from charms.layer import snap
+from charms.leadership import (
+ leader_get,
+ leader_set
+)
+from charms.reactive import (
+ when,
+ when_not,
+ when_any,
+ data_changed
+)
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.host import is_container
+from charmhelpers.core.sysctl import create as create_sysctl
+
+
+@when_any('kubernetes-master.snaps.installed',
+ 'kubernetes-worker.snaps.installed')
+@when('snap.refresh.set')
+@when('leadership.is_leader')
+def process_snapd_timer():
+ """
+ Set the snapd refresh timer on the leader so all cluster members
+ (present and future) will refresh near the same time.
+
+ :return: None
+ """
+ # Get the current snapd refresh timer; we know layer-snap has set this
+ # when the 'snap.refresh.set' flag is present.
+ timer = snap.get(
+ snapname='core', key='refresh.timer').decode('utf-8').strip()
+ if not timer:
+ # The core snap timer is empty. This likely means a subordinate timer
+ # reset ours. Try to set it back to a previously leader-set value,
+ # falling back to config if needed. Luckily, this should only happen
+ # during subordinate install, so this should remain stable afterward.
+ timer = leader_get('snapd_refresh') or hookenv.config('snapd_refresh')
+ snap.set_refresh_timer(timer)
+
+ # Ensure we have the timer known by snapd (it may differ from config).
+ timer = snap.get(
+ snapname='core', key='refresh.timer').decode('utf-8').strip()
+
+ # The first time through, data_changed will be true. Subsequent calls
+ # should only update leader data if something changed.
+ if data_changed('snapd_refresh', timer):
+ hookenv.log('setting leader snapd_refresh timer to: {}'.format(timer))
+ leader_set({'snapd_refresh': timer})
+
+
+@when_any('kubernetes-master.snaps.installed',
+ 'kubernetes-worker.snaps.installed')
+@when('snap.refresh.set')
+@when('leadership.changed.snapd_refresh')
+@when_not('leadership.is_leader')
+def set_snapd_timer():
+ """
+ Set the snapd refresh.timer on non-leader cluster members.
+
+ :return: None
+ """
+ # NB: This method should only be run when 'snap.refresh.set' is present.
+ # Layer-snap will always set a core refresh.timer, which may not be the
+ # same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
+ # has finished and we are free to set our config to the leader's timer.
+ timer = leader_get('snapd_refresh') or '' # None will error
+ hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
+ snap.set_refresh_timer(timer)
+
+
+@when('config.changed.sysctl')
+def write_sysctl():
+ """
+ :return: None
+ """
+ sysctl_settings = hookenv.config('sysctl')
+ if sysctl_settings and not is_container():
+ create_sysctl(
+ sysctl_settings,
+ '/etc/sysctl.d/50-kubernetes-charm.conf',
+ # Some keys in the config may not exist in /proc/sys/net/.
+ # For example, the conntrack module may not be loaded when
+ # using lxd drivers insteam of kvm. In these cases, we
+ # simply ignore the missing keys, rather than making time
+ # consuming calls out to the filesystem to check for their
+ # existence.
+ ignore=True)
diff --git a/kubernetes-master/reactive/leadership.py b/kubernetes-master/reactive/leadership.py
new file mode 100644
index 0000000..29c6f3a
--- /dev/null
+++ b/kubernetes-master/reactive/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.leadership import leader_get, leader_set
+
+
+__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
+
+
+def initialize_leadership_state():
+ '''Initialize leadership.* states from the hook environment.
+
+ Invoked by hookenv.atstart() so states are available in
+ @hook decorated handlers.
+ '''
+ is_leader = hookenv.is_leader()
+ if is_leader:
+ hookenv.log('Initializing Leadership Layer (is leader)')
+ else:
+ hookenv.log('Initializing Leadership Layer (is follower)')
+
+ reactive.helpers.toggle_state('leadership.is_leader', is_leader)
+
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+ current = hookenv.leader_get()
+
+ # Handle deletions.
+ for key in set(previous.keys()) - set(current.keys()):
+ current[key] = None
+
+ any_changed = False
+ for key, value in current.items():
+ reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
+ value != previous.get(key))
+ if value != previous.get(key):
+ any_changed = True
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ reactive.helpers.toggle_state('leadership.changed', any_changed)
+
+ unitdata.kv().update(current, prefix='leadership.settings.')
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(reactive, '_leadership_registered'):
+ hookenv.atstart(initialize_leadership_state)
+ reactive._leadership_registered = True
diff --git a/kubernetes-master/reactive/snap.py b/kubernetes-master/reactive/snap.py
new file mode 100644
index 0000000..1fda7b7
--- /dev/null
+++ b/kubernetes-master/reactive/snap.py
@@ -0,0 +1,349 @@
+# Copyright 2016-2019 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+charms.reactive helpers for dealing with Snap packages.
+"""
+from collections import OrderedDict
+from distutils.version import LooseVersion
+import os.path
+from os import uname
+import shutil
+import subprocess
+from textwrap import dedent
+import time
+from urllib.request import urlretrieve
+
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.hookenv import ERROR
+from charmhelpers.core.host import write_file
+from charms import layer
+from charms import reactive
+from charms.layer import snap
+from charms.reactive import register_trigger, when, when_not, toggle_flag
+from charms.reactive.helpers import data_changed
+
+
+class UnsatisfiedMinimumVersionError(Exception):
+ def __init__(self, desired, actual):
+ super().__init__()
+ self.desired = desired
+ self.actual = actual
+
+ def __str__(self):
+ return "Could not install snapd >= {0.desired}, got {0.actual}".format(self)
+
+
+class InvalidBundleError(Exception):
+ pass
+
+
+def sorted_snap_opts():
+ opts = layer.options("snap")
+ opts = sorted(opts.items(), key=lambda item: item[0] != "core")
+ opts = OrderedDict(opts)
+ return opts
+
+
+def install():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname().machine
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ # Note that this does *not* error. The charm will need to
+ # cope with the snaps it requested never getting installed,
+ # likely by doing its own check on supported-architectures.
+ hookenv.log(
+ "Snap {} not supported on {!r} architecture" "".format(snapname, arch),
+ ERROR,
+ )
+ continue
+ installed_flag = "snap.installed.{}".format(snapname)
+ if not reactive.is_flag_set(installed_flag):
+ snap.install(snapname, **snap_opts)
+ if data_changed("snap.install.opts", opts):
+ snap.connect_all()
+
+
+def check_refresh_available():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ available_refreshes = snap.get_available_refreshes()
+ for snapname in snap.get_installed_snaps():
+ toggle_flag(snap.get_refresh_available_flag(snapname), snapname in available_refreshes)
+
+
+def refresh():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname()[4]
+ check_refresh_available()
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ continue
+ snap.refresh(snapname, **snap_opts)
+ snap.connect_all()
+
+
+@reactive.hook("upgrade-charm")
+def upgrade_charm():
+ refresh()
+
+
+def get_series():
+ return subprocess.check_output(["lsb_release", "-sc"], universal_newlines=True).strip()
+
+
+def snapd_supported():
+ # snaps are not supported in trusty lxc containers.
+ if get_series() == "trusty" and host.is_container():
+ return False
+ return True # For all other cases, assume true.
+
+
+def kernel_supported():
+ kernel_version = uname().release
+
+ if LooseVersion(kernel_version) < LooseVersion("4.4"):
+ hookenv.log(
+ "Snaps do not work on kernel {}, a reboot "
+ "into a supported kernel (>4.4) is required"
+ "".format(kernel_version)
+ )
+ return False
+ return True
+
+
+def ensure_snapd():
+ if not snapd_supported():
+ hookenv.log("Snaps do not work in this environment", hookenv.ERROR)
+ raise Exception("Snaps do not work in this environment")
+
+ # I don't use the apt layer, because that would tie this layer
+ # too closely to apt packaging. Perhaps this is a snap-only system.
+ if not shutil.which("snap"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "snapd"]
+ # LP:1699986: Force install of systemd on Trusty.
+ if get_series() == "trusty":
+ cmd.append("systemd")
+ subprocess.check_call(cmd, universal_newlines=True)
+
+ # Work around lp:1628289. Remove this stanza once snapd depends
+ # on the necessary package and snaps work in lxd xenial containers
+ # without the workaround.
+ if host.is_container() and not shutil.which("squashfuse"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "squashfuse", "fuse"]
+ subprocess.check_call(cmd, universal_newlines=True)
+
+
+def proxy_settings():
+ proxy_vars = ("http_proxy", "https_proxy")
+ proxy_env = {key: value for key, value in os.environ.items() if key in proxy_vars}
+
+ snap_proxy = hookenv.config().get("snap_proxy")
+ if snap_proxy:
+ proxy_env["http_proxy"] = snap_proxy
+ proxy_env["https_proxy"] = snap_proxy
+ return proxy_env
+
+
+def update_snap_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ # This is a hack based on
+ # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1
+ # Do it properly when Bug #1533899 is addressed.
+ # Note we can't do this in a standard reactive handler as we need
+ # to ensure proxies are configured before attempting installs or
+ # updates.
+ proxy = proxy_settings()
+
+ override_dir = "/etc/systemd/system/snapd.service.d"
+ path = os.path.join(override_dir, "snap_layer_proxy.conf")
+ if not proxy and not os.path.exists(path):
+ return # No proxy asked for and proxy never configured.
+
+ # It seems we cannot rely on this directory existing, so manually
+ # create it.
+ if not os.path.exists(override_dir):
+ host.mkdir(override_dir, perms=0o755)
+
+ if not data_changed("snap.proxy", proxy):
+ return # Short circuit avoids unnecessary restarts.
+
+ if proxy:
+ create_snap_proxy_conf(path, proxy)
+ else:
+ remove_snap_proxy_conf(path)
+ subprocess.check_call(["systemctl", "daemon-reload"], universal_newlines=True)
+ time.sleep(2)
+ subprocess.check_call(["systemctl", "restart", "snapd.service"], universal_newlines=True)
+
+
+def create_snap_proxy_conf(path, proxy):
+ host.mkdir(os.path.dirname(path))
+ content = dedent(
+ """\
+ # Managed by Juju
+ [Service]
+ """
+ )
+ for proxy_key, proxy_value in proxy.items():
+ content += "Environment={}={}\n".format(proxy_key, proxy_value)
+ host.write_file(path, content.encode())
+
+
+def remove_snap_proxy_conf(path):
+ if os.path.exists(path):
+ os.remove(path)
+
+
+def ensure_path():
+ # Per Bug #1662856, /snap/bin may be missing from $PATH. Fix this.
+ if "/snap/bin" not in os.environ["PATH"].split(":"):
+ os.environ["PATH"] += ":/snap/bin"
+
+
+def _get_snapd_version():
+ stdout = subprocess.check_output(["snap", "version"], stdin=subprocess.DEVNULL, universal_newlines=True)
+ version_info = dict(line.split(None, 1) for line in stdout.splitlines())
+ return LooseVersion(version_info["snapd"])
+
+
+PREFERENCES = """\
+Package: *
+Pin: release a={}-proposed
+Pin-Priority: 400
+"""
+
+
+def ensure_snapd_min_version(min_version):
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ from charmhelpers.fetch import add_source, apt_update, apt_install
+
+ # Temporary until LP:1735344 lands
+ add_source("distro-proposed", fail_invalid=True)
+ distro = get_series()
+ # disable proposed by default, needs to explicit
+ write_file(
+ "/etc/apt/preferences.d/proposed",
+ PREFERENCES.format(distro),
+ )
+ apt_update()
+ # explicitly install snapd from proposed
+ apt_install("snapd/{}-proposed".format(distro))
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR)
+ raise UnsatisfiedMinimumVersionError(min_version, snapd_version)
+
+
+def download_assertion_bundle(proxy_url):
+ """Download proxy assertion bundle and store id"""
+ assertions_url = "{}/v2/auth/store/assertions".format(proxy_url)
+ local_bundle, headers = urlretrieve(assertions_url)
+ store_id = headers["X-Assertion-Store-Id"]
+ return local_bundle, store_id
+
+
+def configure_snap_store_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ if not reactive.is_flag_set("config.changed.snap_proxy_url"):
+ return
+ config = hookenv.config()
+ if "snap_proxy_url" not in config:
+ # The deprecated snap_proxy_url config items have been removed
+ # from config.yaml. If the charm author hasn't added them back
+ # explicitly, there is nothing to do. Juju is maintaining these
+ # settings as model configuration.
+ return
+ snap_store_proxy_url = config.get("snap_proxy_url")
+ if not snap_store_proxy_url and not config.previous("snap_proxy_url"):
+ # Proxy url is not set, and was not set previous hook. Do nothing,
+ # to avoid overwriting the Juju maintained setting.
+ return
+ ensure_snapd_min_version("2.30")
+ if snap_store_proxy_url:
+ bundle, store_id = download_assertion_bundle(snap_store_proxy_url)
+ try:
+ subprocess.check_output(
+ ["snap", "ack", bundle],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("snapd could not ack the proxy assertion: " + e.output)
+ else:
+ store_id = ""
+
+ try:
+ subprocess.check_output(
+ ["snap", "set", "core", "proxy.store={}".format(store_id)],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("Proxy ID from header did not match store assertion: " + e.output)
+
+
+register_trigger(when="config.changed.snapd_refresh", clear_flag="snap.refresh.set")
+
+
+@when_not("snap.refresh.set")
+@when("snap.installed.core")
+def change_snapd_refresh():
+ """Set the system refresh.timer option"""
+ ensure_snapd_min_version("2.31")
+ timer = hookenv.config()["snapd_refresh"]
+ was_set = reactive.is_flag_set("snap.refresh.was-set")
+ if timer or was_set:
+ snap.set_refresh_timer(timer)
+ reactive.toggle_flag("snap.refresh.was-set", timer)
+ reactive.set_flag("snap.refresh.set")
+
+
+# Bootstrap. We don't use standard reactive handlers to ensure that
+# everything is bootstrapped before any charm handlers are run.
+hookenv.atstart(hookenv.log, "Initializing Snap Layer")
+hookenv.atstart(ensure_snapd)
+hookenv.atstart(ensure_path)
+hookenv.atstart(update_snap_proxy)
+hookenv.atstart(configure_snap_store_proxy)
+hookenv.atstart(install)
diff --git a/kubernetes-master/reactive/status.py b/kubernetes-master/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/kubernetes-master/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/kubernetes-master/reactive/tls_client.py b/kubernetes-master/reactive/tls_client.py
new file mode 100644
index 0000000..afa2228
--- /dev/null
+++ b/kubernetes-master/reactive/tls_client.py
@@ -0,0 +1,208 @@
+import os
+
+from pathlib import Path
+from subprocess import check_call
+
+from charms import layer
+from charms.reactive import hook
+from charms.reactive import set_state, remove_state
+from charms.reactive import when
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import endpoint_from_flag
+from charms.reactive.helpers import data_changed
+
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.core.hookenv import log
+
+
+@when('certificates.ca.available')
+def store_ca(tls):
+ '''Read the certificate authority from the relation object and install
+ the ca on this system.'''
+ # Get the CA from the relationship object.
+ certificate_authority = tls.get_ca()
+ if certificate_authority:
+ layer_options = layer.options('tls-client')
+ ca_path = layer_options.get('ca_certificate_path')
+ changed = data_changed('certificate_authority', certificate_authority)
+ if ca_path:
+ if changed or not os.path.exists(ca_path):
+ log('Writing CA certificate to {0}'.format(ca_path))
+ # ensure we have a newline at the end of the certificate.
+ # some things will blow up without one.
+ # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034
+ if not certificate_authority.endswith('\n'):
+ certificate_authority += '\n'
+ _write_file(ca_path, certificate_authority)
+ set_state('tls_client.ca.written')
+ set_state('tls_client.ca.saved')
+ if changed:
+ # Update /etc/ssl/certs and generate ca-certificates.crt
+ install_ca(certificate_authority)
+
+
+@when('certificates.server.cert.available')
+def store_server(tls):
+ '''Read the server certificate and server key from the relation object
+ and save them to the certificate directory..'''
+ server_cert, server_key = tls.get_server_cert()
+ chain = tls.get_chain()
+ if chain:
+ server_cert = server_cert + '\n' + chain
+ if server_cert and server_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('server_certificate_path')
+ key_path = layer_options.get('server_key_path')
+ cert_changed = data_changed('server_certificate', server_cert)
+ key_changed = data_changed('server_key', server_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing server certificate to {0}'.format(cert_path))
+ _write_file(cert_path, server_cert)
+ set_state('tls_client.server.certificate.written')
+ set_state('tls_client.server.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing server key to {0}'.format(key_path))
+ _write_file(key_path, server_key)
+ set_state('tls_client.server.key.saved')
+
+
+@when('certificates.client.cert.available')
+def store_client(tls):
+ '''Read the client certificate and client key from the relation object
+ and copy them to the certificate directory.'''
+ client_cert, client_key = tls.get_client_cert()
+ chain = tls.get_chain()
+ if chain:
+ client_cert = client_cert + '\n' + chain
+ if client_cert and client_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('client_certificate_path')
+ key_path = layer_options.get('client_key_path')
+ cert_changed = data_changed('client_certificate', client_cert)
+ key_changed = data_changed('client_key', client_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing client certificate to {0}'.format(cert_path))
+ _write_file(cert_path, client_cert)
+ set_state('tls_client.client.certificate.written')
+ set_state('tls_client.client.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing client key to {0}'.format(key_path))
+ _write_file(key_path, client_key)
+ set_state('tls_client.client.key.saved')
+
+
+@when('certificates.certs.changed')
+def update_certs():
+ tls = endpoint_from_flag('certificates.certs.changed')
+ certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {})
+ all_ready = True
+ any_changed = False
+ maps = {
+ 'server': tls.server_certs_map,
+ 'client': tls.client_certs_map,
+ }
+
+ if maps.get('client') == {}:
+ log(
+ 'No client certs found using maps. Checking for global \
+ client certificates.',
+ 'WARNING'
+ )
+ # Check for global certs,
+ # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819
+ cert_pair = tls.get_client_cert()
+ if cert_pair is not None:
+ for client_name in certs_paths.get('client', {}).keys():
+ maps.get('client').update({
+ client_name: cert_pair
+ })
+
+ chain = tls.get_chain()
+ for cert_type in ('server', 'client'):
+ for common_name, paths in certs_paths.get(cert_type, {}).items():
+ cert_pair = maps[cert_type].get(common_name)
+ if not cert_pair:
+ all_ready = False
+ continue
+ if not data_changed('layer.tls-client.'
+ '{}.{}'.format(cert_type, common_name), cert_pair):
+ continue
+
+ cert = None
+ key = None
+ if type(cert_pair) is not tuple:
+ if paths['crt']:
+ cert = cert_pair.cert
+ if paths['key']:
+ key = cert_pair.key
+ else:
+ cert, key = cert_pair
+
+ if cert:
+ if chain:
+ cert = cert + '\n' + chain
+ _ensure_directory(paths['crt'])
+ Path(paths['crt']).write_text(cert)
+
+ if key:
+ _ensure_directory(paths['key'])
+ Path(paths['key']).write_text(key)
+
+ any_changed = True
+ # clear flags first to ensure they are re-triggered if left set
+ clear_flag('tls_client.{}.certs.changed'.format(cert_type))
+ clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ set_flag('tls_client.{}.certs.changed'.format(cert_type))
+ set_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ if all_ready:
+ set_flag('tls_client.certs.saved')
+ if any_changed:
+ clear_flag('tls_client.certs.changed')
+ set_flag('tls_client.certs.changed')
+ clear_flag('certificates.certs.changed')
+
+
+def install_ca(certificate_authority):
+ '''Install a certificiate authority on the system by calling the
+ update-ca-certificates command.'''
+ if certificate_authority:
+ name = hookenv.service_name()
+ # Create a path to install CAs on Debian systems.
+ ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name)
+ log('Writing CA certificate to {0}'.format(ca_path))
+ _write_file(ca_path, certificate_authority)
+ # Update the trusted CAs on this system (a time expensive operation).
+ check_call(['update-ca-certificates'])
+ log('Generated ca-certificates.crt for {0}'.format(name))
+ set_state('tls_client.ca_installed')
+
+
+@hook('upgrade-charm')
+def remove_states():
+ remove_state('tls_client.ca.saved')
+ remove_state('tls_client.server.certificate.saved')
+ remove_state('tls_client.server.key.saved')
+ remove_state('tls_client.client.certificate.saved')
+ remove_state('tls_client.client.key.saved')
+
+
+def _ensure_directory(path):
+ '''Ensure the parent directory exists creating directories if necessary.'''
+ directory = os.path.dirname(path)
+ if not os.path.isdir(directory):
+ os.makedirs(directory)
+ os.chmod(directory, 0o770)
+
+
+def _write_file(path, content):
+ '''Write the path to a file.'''
+ _ensure_directory(path)
+ with open(path, 'w') as stream:
+ stream.write(content)
+ os.chmod(path, 0o440)
diff --git a/kubernetes-master/reactive/vault_kv.py b/kubernetes-master/reactive/vault_kv.py
new file mode 100644
index 0000000..4396bf6
--- /dev/null
+++ b/kubernetes-master/reactive/vault_kv.py
@@ -0,0 +1,67 @@
+from charmhelpers.core import hookenv, host
+from charms.reactive import when_all, when_not, set_flag, clear_flag
+from charms.reactive import endpoint_from_flag, register_trigger
+from charms.reactive import data_changed
+
+from charms.layer import vault_kv
+
+
+register_trigger(when_not='vault-kv.connected',
+ clear_flag='layer.vault-kv.ready')
+register_trigger(when_not='vault-kv.connected',
+ clear_flag='layer.vault-kv.requested')
+
+
+@when_all('vault-kv.connected')
+@when_not('layer.vault-kv.requested')
+def request_vault_access():
+ vault = endpoint_from_flag('vault-kv.connected')
+ backend_name = vault_kv._get_secret_backend()
+ # backend can't be isolated or VaultAppKV won't work; see issue #2
+ vault.request_secret_backend(backend_name, isolated=False)
+ set_flag('layer.vault-kv.requested')
+
+
+@when_all('vault-kv.available')
+def set_ready():
+ try:
+ vault_kv.get_vault_config()
+ except vault_kv.VaultNotReady:
+ clear_flag('layer.vault-kv.ready')
+ else:
+ set_flag('layer.vault-kv.ready')
+
+
+@when_all('layer.vault-kv.ready')
+def check_config_changed():
+ try:
+ config = vault_kv.get_vault_config()
+ except vault_kv.VaultNotReady:
+ return
+ else:
+ if data_changed('layer.vault-kv.config', config):
+ set_flag('layer.vault-kv.config.changed')
+
+
+def manage_app_kv_flags():
+ try:
+ app_kv = vault_kv.VaultAppKV()
+ for key in app_kv.keys():
+ app_kv._manage_flags(key)
+ except vault_kv.VaultNotReady:
+ vault_kv.VaultAppKV._clear_all_flags()
+
+
+def update_app_kv_hashes():
+ try:
+ app_kv = vault_kv.VaultAppKV()
+ if hookenv.is_leader() and app_kv.any_changed():
+ # force hooks to run on non-leader units
+ hookenv.leader_set({'vault-kv-nonce': host.pwgen(8)})
+ app_kv.update_hashes()
+ except vault_kv.VaultNotReady:
+ return
+
+
+hookenv.atstart(manage_app_kv_flags)
+hookenv.atexit(update_app_kv_hashes)
diff --git a/kubernetes-master/reactive/vaultlocker.py b/kubernetes-master/reactive/vaultlocker.py
new file mode 100644
index 0000000..d591c1e
--- /dev/null
+++ b/kubernetes-master/reactive/vaultlocker.py
@@ -0,0 +1,49 @@
+import shutil
+
+from charms.reactive import when_all, when_not, set_flag, clear_flag
+from charmhelpers.core import hookenv, host
+
+from charms import apt
+from charms import layer
+
+
+@when_not('apt.installed.vaultlocker')
+def install_vaultlocker():
+ '''Install vaultlocker.
+
+ On bionic and higher, vaultlocker is available in the default system
+ sources. For xenial, we need to add the queens cloud archive.
+ '''
+ dist = host.lsb_release()
+ dist_series = dist['DISTRIB_CODENAME'].lower()
+ if dist_series == 'xenial':
+ apt.add_source('cloud:queens')
+ apt.update()
+ apt.queue_install(['vaultlocker'])
+
+
+@when_all('apt.installed.vaultlocker',
+ 'layer.vault-kv.ready',
+ 'layer.vault-kv.config.changed')
+def configure_vaultlocker():
+ # write VaultLocker config file
+ layer.vaultlocker.write_vaultlocker_conf(layer.vault_kv.get_vault_config())
+ # create location for loop device service envs
+ layer.vaultlocker.LOOP_ENVS.mkdir(parents=True, exist_ok=True)
+ # create loop device service template
+ shutil.copyfile('templates/vaultlocker-loop@.service',
+ '/etc/systemd/system/vaultlocker-loop@.service')
+ # mark as complete
+ set_flag('layer.vaultlocker.configured')
+ clear_flag('layer.vault-kv.config.changed')
+
+
+@when_all('layer.vaultlocker.configured')
+@when_not('layer.vaultlocker.ready')
+def auto_encrypt():
+ metadata = hookenv.metadata()
+ for storage_name, storage_metadata in metadata.get('storage', {}).items():
+ if storage_metadata.get('vaultlocker-encrypt', False):
+ mountbase = storage_metadata.get('vaultlocker-mountbase')
+ layer.vaultlocker.encrypt_storage(storage_name, mountbase)
+ set_flag('layer.vaultlocker.ready')
diff --git a/kubernetes-master/requirements.txt b/kubernetes-master/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/kubernetes-master/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/kubernetes-master/revision b/kubernetes-master/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/kubernetes-master/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/kubernetes-master/setup.py b/kubernetes-master/setup.py
new file mode 100755
index 0000000..b30bff5
--- /dev/null
+++ b/kubernetes-master/setup.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+import os
+from setuptools import setup
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+with open(os.path.join(here, "README.md")) as f:
+ README = f.read()
+
+setup(
+ name="layer_snap",
+ version="1.0.0",
+ description="layer_snap",
+ long_description=README,
+ license="Apache License 2.0",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3",
+ ],
+ url="https://git.launchpad.net/layer-snap",
+ package_dir={"": "lib"},
+ packages=["charms/layer"],
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=["charmhelpers", "charms.reactive"],
+)
diff --git a/kubernetes-master/templates/cdk-service-kicker b/kubernetes-master/templates/cdk-service-kicker
new file mode 100644
index 0000000..26d3740
--- /dev/null
+++ b/kubernetes-master/templates/cdk-service-kicker
@@ -0,0 +1,34 @@
+#!/bin/sh
+set -eu
+
+# This service runs on boot to work around issues relating to LXD and snapd.
+
+# Workaround for https://github.com/conjure-up/conjure-up/issues/1448
+if [ -f '/proc/1/environ' ] && grep -q '^container=lxc' /proc/1/environ; then
+ echo "lxc detected, applying snapd apparmor profiles"
+ (set +e
+ apparmor_parser /var/lib/snapd/apparmor/profiles/*
+ echo "apparmor_parser: exit status $?"
+ )
+else
+ echo "lxc not detected, skipping snapd apparmor profiles"
+fi
+
+# Workaround for https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+services="{{services}}"
+
+deadline="$(expr "$(date +%s)" + 600)"
+
+while [ "$(date +%s)" -lt "$deadline" ]; do
+ for service in $services; do
+ echo "$service: checking"
+ if ! systemctl is-active "$service"; then
+ echo "$service: not active, restarting"
+ systemctl restart "$service" || true
+ fi
+ done
+
+ sleep 10
+done
+
+echo "deadline has passed, exiting gracefully"
diff --git a/kubernetes-master/templates/cdk-service-kicker.service b/kubernetes-master/templates/cdk-service-kicker.service
new file mode 100644
index 0000000..5c2105e
--- /dev/null
+++ b/kubernetes-master/templates/cdk-service-kicker.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=cdk-service-kicker
+
+[Service]
+ExecStart=/usr/bin/cdk-service-kicker
+Restart=on-failure
+Type=simple
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-master/templates/cdk.master.auth-webhook-conf.yaml b/kubernetes-master/templates/cdk.master.auth-webhook-conf.yaml
new file mode 100644
index 0000000..e2d3fa0
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.auth-webhook-conf.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Config
+clusters:
+ - name: authn
+ cluster:
+ certificate-authority: /root/cdk/ca.crt
+ server: https://{{ host }}:{{ port }}/{{ api_ver }}
+users:
+ - name: kube-apiserver
+contexts:
+- context:
+ cluster: authn
+ user: kube-apiserver
+ name: authn
+current-context: authn
diff --git a/kubernetes-master/templates/cdk.master.auth-webhook-secret.yaml b/kubernetes-master/templates/cdk.master.auth-webhook-secret.yaml
new file mode 100644
index 0000000..a12c402
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.auth-webhook-secret.yaml
@@ -0,0 +1,13 @@
+# Manifest for CK secrets that auth-webhook expects
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ secret_name }}
+ namespace: {{ secret_namespace }}
+type: {{ type }}
+data:
+ uid: {{ user }}
+ username: {{ username }}
+ password: {{ password }}
+ groups: '{{ groups }}'
diff --git a/kubernetes-master/templates/cdk.master.auth-webhook.logrotate b/kubernetes-master/templates/cdk.master.auth-webhook.logrotate
new file mode 100644
index 0000000..aefa2b7
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.auth-webhook.logrotate
@@ -0,0 +1,11 @@
+{{ root_dir }}/*.log {
+ daily
+ rotate 10
+ missingok
+ notifempty
+ compress
+ sharedscripts
+ postrotate
+ kill -USR1 $(cat {{ root_dir }}/{{ pidfile }})
+ endscript
+}
diff --git a/kubernetes-master/templates/cdk.master.auth-webhook.py b/kubernetes-master/templates/cdk.master.auth-webhook.py
new file mode 100644
index 0000000..4cb0cd6
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.auth-webhook.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python3
+
+import csv
+import json
+import logging
+import requests
+from base64 import b64decode
+from copy import deepcopy
+from flask import Flask, request, jsonify
+from pathlib import Path
+from subprocess import check_call, check_output, CalledProcessError, TimeoutExpired
+from yaml import safe_load
+app = Flask(__name__)
+
+
+def kubectl(*args):
+ '''Run a kubectl cli command with a config file.
+
+ Returns stdout and throws an error if the command fails.
+ '''
+ # Try to use our service account kubeconfig; fall back to root if needed
+ kubectl_cmd = Path('/snap/bin/kubectl')
+ if not kubectl_cmd.is_file():
+ # Fall back to anywhere on the path if the snap isn't available
+ kubectl_cmd = 'kubectl'
+ kubeconfig = '/root/.kube/config'
+ command = [str(kubectl_cmd), '--kubeconfig={}'.format(kubeconfig)] + list(args)
+ return check_output(command, timeout=10)
+
+
+def log_secret(text, obj, hide=True):
+ '''Log information about a TokenReview object.
+
+ The message will always be logged at the 'debug' level and will be in the
+ form "text: obj". By default, secrets will be hidden. Set 'hide=False' to
+ have the secret printed in the output unobfuscated.
+ '''
+ log_obj = obj
+ if obj and hide:
+ log_obj = deepcopy(obj)
+ try:
+ log_obj['spec']['token'] = '********'
+ except (KeyError, TypeError):
+ # No secret here, carry on
+ pass
+ app.logger.debug('{}: {}'.format(text, log_obj))
+
+
+def check_token(token_review):
+ '''Populate user info if token is found in auth-related files.'''
+ app.logger.info('Checking token')
+ token_to_check = token_review['spec']['token']
+
+ # If we have an admin token, short-circuit all other checks. This prevents us
+ # from leaking our admin token to other authn services.
+ admin_kubeconfig = Path('/root/.kube/config')
+ if admin_kubeconfig.exists():
+ with admin_kubeconfig.open('r') as f:
+ data = safe_load(f)
+ try:
+ admin_token = data['users'][0]['user']['token']
+ except (KeyError, ValueError):
+ # No admin kubeconfig; this is weird since we should always have an
+ # admin kubeconfig, but we shouldn't fail here in case there's
+ # something in known_tokens that should be validated.
+ pass
+ else:
+ if token_to_check == admin_token:
+ # We have a valid admin
+ token_review['status'] = {
+ 'authenticated': True,
+ 'user': {
+ 'username': 'admin',
+ 'uid': 'admin',
+ 'groups': ['system:masters']
+ }
+ }
+ return True
+
+ # No admin? We're probably in an upgrade. Check an existing known_tokens.csv.
+ csv_fields = ['token', 'username', 'user', 'groups']
+ known_tokens = Path('/root/cdk/known_tokens.csv')
+ try:
+ with known_tokens.open('r') as f:
+ data_by_token = {r['token']: r for r in csv.DictReader(f, csv_fields)}
+ except FileNotFoundError:
+ data_by_token = {}
+
+ if token_to_check in data_by_token:
+ record = data_by_token[token_to_check]
+ # groups are optional; default to an empty string if we don't have any
+ groups = record.get('groups', '').split(',')
+ token_review['status'] = {
+ 'authenticated': True,
+ 'user': {
+ 'username': record['username'],
+ 'uid': record['user'],
+ 'groups': groups,
+ }
+ }
+ return True
+ return False
+
+
+def check_secrets(token_review):
+ '''Populate user info if token is found in k8s secrets.'''
+ # Only check secrets if kube-apiserver is up
+ try:
+ output = check_call(['systemctl', 'is-active', 'snap.kube-apiserver.daemon'])
+ except CalledProcessError:
+ app.logger.info('Skipping secret check: kube-apiserver is not ready')
+ return False
+ else:
+ app.logger.info('Checking secret')
+
+ token_to_check = token_review['spec']['token']
+ try:
+ output = kubectl(
+ 'get', 'secrets', '-n', 'kube-system', '-o', 'json').decode('UTF-8')
+ except (CalledProcessError, TimeoutExpired) as e:
+ app.logger.info('Unable to load secrets: {}.'.format(e))
+ return False
+
+ secrets = json.loads(output)
+ if 'items' in secrets:
+ for secret in secrets['items']:
+ try:
+ data_b64 = secret['data']
+ password_b64 = data_b64['password'].encode('UTF-8')
+ username_b64 = data_b64['username'].encode('UTF-8')
+ except (KeyError, TypeError):
+ # CK secrets will have populated 'data', but not all secrets do
+ continue
+
+ password = b64decode(password_b64).decode('UTF-8')
+ if token_to_check == password:
+ groups_b64 = data_b64['groups'].encode('UTF-8') \
+ if 'groups' in data_b64 else b''
+
+ # NB: CK creates k8s secrets with the 'password' field set as
+ # uid::token. Split the decoded password so we can send a 'uid' back.
+ # If there is no delimiter, set uid == username.
+ # TODO: make the delimeter less magical so it doesn't get out of
+ # sync with the function that creates secrets in k8s-master.py.
+ username = uid = b64decode(username_b64).decode('UTF-8')
+ pw_delim = '::'
+ if pw_delim in password:
+ uid = password.rsplit(pw_delim, 1)[0]
+ groups = b64decode(groups_b64).decode('UTF-8').split(',')
+ token_review['status'] = {
+ 'authenticated': True,
+ 'user': {
+ 'username': username,
+ 'uid': uid,
+ 'groups': groups,
+ }
+ }
+ return True
+ return False
+
+
+def check_aws_iam(token_review):
+ '''Check the request with an AWS IAM authn server.'''
+ app.logger.info('Checking AWS IAM')
+
+ # URL comes from /root/cdk/aws-iam-webhook.yaml
+ url = '{{ aws_iam_endpoint }}'
+ app.logger.debug('Forwarding to: {}'.format(url))
+
+ return forward_request(token_review, url)
+
+
+def check_keystone(token_review):
+ '''Check the request with a Keystone authn server.'''
+ app.logger.info('Checking Keystone')
+
+ # URL comes from /root/cdk/keystone/webhook.yaml
+ url = '{{ keystone_endpoint }}'
+ app.logger.debug('Forwarding to: {}'.format(url))
+
+ return forward_request(token_review, url)
+
+
+def check_custom(token_review):
+ '''Check the request with a user-specified authn server.'''
+ app.logger.info('Checking Custom Endpoint')
+
+ # User will set the URL in k8s-master config
+ url = '{{ custom_authn_endpoint }}'
+ app.logger.debug('Forwarding to: {}'.format(url))
+
+ return forward_request(token_review, url)
+
+
+def forward_request(json_req, url):
+ '''Forward a JSON TokenReview request to a url.
+
+ Returns True if the request is authenticated; False if the response is
+ either invalid or authn has been denied.
+ '''
+ timeout = 10
+ try:
+ try:
+ r = requests.post(url, json=json_req, timeout=timeout)
+ except requests.exceptions.SSLError:
+ app.logger.debug('SSLError with server; skipping cert validation')
+ r = requests.post(url, json=json_req, verify=False, timeout=timeout)
+ except Exception as e:
+ app.logger.debug('Failed to contact server: {}'.format(e))
+ return False
+
+ # Check if the response is valid
+ try:
+ resp = json.loads(r.text)
+ 'authenticated' in resp['status']
+ except (KeyError, TypeError, ValueError):
+ log_secret(text='Invalid response from server', obj=r.text)
+ return False
+
+ # NB: When a forwarded request is authenticated, set the 'status' field to
+ # whatever the external server sends us. This ensures any status fields that
+ # the server wants to send makes it back to the kube apiserver.
+ if resp['status']['authenticated']:
+ json_req['status'] = resp['status']
+ return True
+ return False
+
+
+@app.route('/{{ api_ver }}', methods=['POST'])
+def webhook():
+ '''Listen on /$api_version for POST requests.
+
+ For a POSTed TokenReview object, check every known authentication mechanism
+ for a user with a matching token.
+
+ The /$api_version is expected to be the api version of the authentication.k8s.io
+ TokenReview that the k8s-apiserver will be sending.
+
+ Returns:
+ TokenReview object with 'authenticated: True' and user attributes if a
+ token is found; otherwise, a TokenReview object with 'authenticated: False'
+ '''
+ # Log to gunicorn
+ glogger = logging.getLogger('gunicorn.error')
+ app.logger.handlers = glogger.handlers
+ app.logger.setLevel(glogger.level)
+
+ req = request.json
+ try:
+ valid = True if (req['kind'] == 'TokenReview' and
+ req['spec']['token']) else False
+ except (KeyError, TypeError):
+ valid = False
+
+ if valid:
+ log_secret(text='REQ', obj=req)
+ else:
+ log_secret(text='Invalid request', obj=req)
+ return '' # flask needs to return something that isn't None
+
+ # Make the request unauthenticated by deafult
+ req['status'] = {'authenticated': False}
+
+ if (
+ check_token(req)
+ or check_secrets(req)
+ {%- if aws_iam_endpoint %}
+ or check_aws_iam(req)
+ {%- endif %}
+ {%- if keystone_endpoint %}
+ or check_keystone(req)
+ {%- endif %}
+ {%- if custom_authn_endpoint %}
+ or check_custom(req)
+ {%- endif %}
+ ):
+ # Successful checks will set auth and user data in the 'req' dict
+ log_secret(text='ACK', obj=req)
+ else:
+ log_secret(text='NAK', obj=req)
+
+ return jsonify(req)
+
+
+if __name__ == '__main__':
+ app.run()
diff --git a/kubernetes-master/templates/cdk.master.auth-webhook.service b/kubernetes-master/templates/cdk.master.auth-webhook.service
new file mode 100644
index 0000000..a7bf0ed
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.auth-webhook.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=CDK master auth webhook
+After=network.target
+
+[Service]
+User=root
+WorkingDirectory={{ root_dir }}
+ExecStart={{ charm_dir }}/../.venv/bin/gunicorn \
+ --bind {{ host }}:{{ port }} \
+ --capture-output \
+ --certfile /root/cdk/server.crt \
+ --disable-redirect-access-to-syslog \
+ --error-logfile auth-webhook.log \
+ --keyfile /root/cdk/server.key \
+ --log-level debug \
+ --pid {{ pidfile }} \
+ --workers {{ num_workers }} \
+ auth-webhook:app
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-master/templates/cdk.master.leader.file-watcher.path b/kubernetes-master/templates/cdk.master.leader.file-watcher.path
new file mode 100644
index 0000000..3855649
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.leader.file-watcher.path
@@ -0,0 +1,7 @@
+[Path]
+PathChanged=/root/cdk/basic_auth.csv
+PathChanged=/root/cdk/known_tokens.csv
+PathChanged=/root/cdk/serviceaccount.key
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-master/templates/cdk.master.leader.file-watcher.service b/kubernetes-master/templates/cdk.master.leader.file-watcher.service
new file mode 100644
index 0000000..c192c2c
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.leader.file-watcher.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=CDK master leader file-watcher
+After=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/juju-run {{ unit }} /usr/local/sbin/cdk.master.leader.file-watcher.sh
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-master/templates/cdk.master.leader.file-watcher.sh b/kubernetes-master/templates/cdk.master.leader.file-watcher.sh
new file mode 100644
index 0000000..01a2e86
--- /dev/null
+++ b/kubernetes-master/templates/cdk.master.leader.file-watcher.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# This script is invoked by cdk.master.leader.file-watcher.service
+
+if [ is-leader ]; then
+ leader-set \
+ "/root/cdk/basic_auth.csv=$(cat /root/cdk/basic_auth.csv)" \
+ "/root/cdk/known_tokens.csv=$(cat /root/cdk/known_tokens.csv)" \
+ "/root/cdk/serviceaccount.key=$(cat /root/cdk/serviceaccount.key)"
+fi
diff --git a/kubernetes-master/templates/ceph-secret.yaml b/kubernetes-master/templates/ceph-secret.yaml
new file mode 100644
index 0000000..fc6b1fb
--- /dev/null
+++ b/kubernetes-master/templates/ceph-secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ceph-secret
+type: kubernetes.io/rbd
+data:
+ key: {{ secret }}
diff --git a/kubernetes-master/templates/ceph.conf b/kubernetes-master/templates/ceph.conf
new file mode 100644
index 0000000..d27c522
--- /dev/null
+++ b/kubernetes-master/templates/ceph.conf
@@ -0,0 +1,18 @@
+[global]
+auth cluster required = {{ auth_supported }}
+auth service required = {{ auth_supported }}
+auth client required = {{ auth_supported }}
+keyring = /etc/ceph/$cluster.$name.keyring
+mon host = {{ mon_hosts }}
+fsid = {{ fsid }}
+
+log to syslog = {{ use_syslog }}
+err to syslog = {{ use_syslog }}
+clog to syslog = {{ use_syslog }}
+mon cluster log to syslog = {{ use_syslog }}
+debug mon = {{ loglevel }}/5
+debug osd = {{ loglevel }}/5
+
+[client]
+log file = /var/log/ceph.log
+
diff --git a/kubernetes-master/templates/create-namespace.yaml.j2 b/kubernetes-master/templates/create-namespace.yaml.j2
new file mode 100644
index 0000000..a121ecc
--- /dev/null
+++ b/kubernetes-master/templates/create-namespace.yaml.j2
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ name }}
+ labels:
+ name: {{ name }}
diff --git a/kubernetes-master/templates/grafana/autoload/kubernetes.json b/kubernetes-master/templates/grafana/autoload/kubernetes.json
new file mode 100644
index 0000000..8f26875
--- /dev/null
+++ b/kubernetes-master/templates/grafana/autoload/kubernetes.json
@@ -0,0 +1,4032 @@
+{
+ "dashboard":{
+ "annotations":{
+ "list":[
+ {
+ "builtIn":1,
+ "datasource":"-- Grafana --",
+ "enable":true,
+ "hide":true,
+ "iconColor":"rgba(0, 211, 255, 1)",
+ "name":"Annotations & Alerts",
+ "type":"dashboard"
+ }
+ ]
+ },
+ "description":"Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics.",
+ "editable":true,
+ "gnetId":315,
+ "graphTooltip":0,
+ "id":null,
+ "iteration":1572969306389,
+ "links":[
+
+ ],
+ "panels":[
+ {
+ "collapsed":false,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":0
+ },
+ "id":49,
+ "panels":[
+
+ ],
+ "repeat":null,
+ "title":"Total usage",
+ "type":"row"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":true,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "editable":true,
+ "error":false,
+ "format":"percent",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":true,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":5,
+ "w":8,
+ "x":0,
+ "y":1
+ },
+ "height":"180px",
+ "id":4,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{id=\"/\"}) / sum (machine_memory_bytes{}) * 100",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"65, 90",
+ "title":"Cluster memory usage",
+ "transparent":false,
+ "type":"singlestat",
+ "valueFontSize":"80%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":true,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"percent",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":true,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":5,
+ "w":8,
+ "x":8,
+ "y":1
+ },
+ "height":"180px",
+ "id":6,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (rate (container_cpu_usage_seconds_total{id=\"/\"}[1m])) / sum (machine_cpu_cores{}) * 100",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"65, 90",
+ "title":"Cluster CPU usage (1m avg)",
+ "type":"singlestat",
+ "valueFontSize":"80%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":true,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"percent",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":true,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":5,
+ "w":8,
+ "x":16,
+ "y":1
+ },
+ "height":"180px",
+ "id":7,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (container_fs_usage_bytes{}) / sum (container_fs_limit_bytes{id=\"/\"}) * 100",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "metric":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"65, 90",
+ "title":"Cluster filesystem usage",
+ "type":"singlestat",
+ "valueFontSize":"80%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"bytes",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":0,
+ "y":6
+ },
+ "height":"1px",
+ "id":9,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"20%",
+ "prefix":"",
+ "prefixFontSize":"20%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{id=\"/\"})",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Used",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"bytes",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":4,
+ "y":6
+ },
+ "height":"1px",
+ "id":10,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (machine_memory_bytes{})",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Total",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"none",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":8,
+ "y":6
+ },
+ "height":"1px",
+ "id":11,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":" cores",
+ "postfixFontSize":"30%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (rate (container_cpu_usage_seconds_total{id=\"/\"}[1m]))",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Used",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"none",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":12,
+ "y":6
+ },
+ "height":"1px",
+ "id":12,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":" cores",
+ "postfixFontSize":"30%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (machine_cpu_cores{})",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Total",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"bytes",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":16,
+ "y":6
+ },
+ "height":"1px",
+ "id":13,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (container_fs_usage_bytes{id=\"/\"})",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Used",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "format":"bytes",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":3,
+ "w":4,
+ "x":20,
+ "y":6
+ },
+ "height":"1px",
+ "id":14,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum (container_fs_limit_bytes{id=\"/\"})",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"",
+ "refId":"A",
+ "step":300
+ }
+ ],
+ "thresholds":"",
+ "title":"Total",
+ "type":"singlestat",
+ "valueFontSize":"50%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"current"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":9
+ },
+ "id":50,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "description":"",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":10
+ },
+ "id":31,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"cpu_usage_idle{cpu=\"cpu-total\",host=~\".*kubernetes-master.*\"}",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{host}}",
+ "refId":"A"
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"kubernetes-master CPU idle",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"percent",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"kubernetes-master CPU idle",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":9
+ },
+ "id":51,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "description":"",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":10
+ },
+ "id":48,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"cpu_usage_idle{cpu=\"cpu-total\",host=~\".*kubernetes-worker.*\"}",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{host}}",
+ "refId":"A"
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"kubernetes-worker CPU idle",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"percent",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"kubernetes-worker CPU idle",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":10
+ },
+ "id":52,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":11
+ },
+ "id":41,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":false,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":true,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum(rate(apiserver_request_latencies_sum{}[1m])) / sum(rate(apiserver_request_latencies_count{}[1m]))",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{username}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"API request latency",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":12,
+ "y":11
+ },
+ "id":37,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum(rate(apiserver_request_count{}[1m])) by (code)",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{code}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"API server hits by code",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":18
+ },
+ "id":38,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":true,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"rate(authenticated_user_requests{}[1m])",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{username}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Authenticated user requests",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":0,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":12,
+ "y":18
+ },
+ "id":39,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"histogram_quantile(0.95, sum(rate(apiserver_request_latencies_bucket{}[5m])) by (le,resource) )",
+ "format":"time_series",
+ "hide":false,
+ "intervalFactor":2,
+ "legendFormat":"{{resource}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"API request latency by resource 95th percentile",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":25
+ },
+ "id":40,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"histogram_quantile(0.95, sum(rate(apiserver_request_latencies_bucket{}[5m])) by (le,verb) )",
+ "format":"time_series",
+ "hide":false,
+ "intervalFactor":2,
+ "legendFormat":"{{verb}}",
+ "refId":"A",
+ "step":30
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"API request latency by resource 95th percentile",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"API server",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":11
+ },
+ "id":53,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":32
+ },
+ "id":42,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"rate(admission_quota_controller_adds{}[10m])",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Admission requests (10min avg)",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"none",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ]
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":12,
+ "y":32
+ },
+ "id":44,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"rate(admission_quota_controller_queue_latency_sum{}[10m]) / rate(admission_quota_controller_queue_latency_count{}[10m])",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Admission controller queue latency (10min avg)",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ]
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":39
+ },
+ "id":43,
+ "legend":{
+ "avg":false,
+ "current":false,
+ "max":false,
+ "min":false,
+ "show":true,
+ "total":false,
+ "values":false
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"rate(admission_quota_controller_work_duration_sum{}[10m]) / rate(admission_quota_controller_work_duration_count{}[10m])",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Admission controller work (10min avg)",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"Admission controller",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":12
+ },
+ "id":54,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":13
+ },
+ "id":36,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":true,
+ "min":true,
+ "show":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"etcd_request_cache_get_latencies_summary{}",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"Quantile {{quantile}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Cache request latencies (get)",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":"0",
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":12,
+ "y":13
+ },
+ "id":35,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":true,
+ "min":true,
+ "show":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"etcd_request_cache_add_latencies_summary{}",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"Quantile {{quantile}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Cache request latencies (add)",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":"0",
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":0,
+ "y":20
+ },
+ "id":33,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":true,
+ "min":true,
+ "show":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"etcd_helper_cache_hit_count{} / (etcd_helper_cache_miss_count{} + etcd_helper_cache_hit_count{}) * 100",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"Hit ratio",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Cache hit ratio",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"percent",
+ "label":null,
+ "logBase":1,
+ "max":"100",
+ "min":"0",
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "fill":1,
+ "gridPos":{
+ "h":7,
+ "w":12,
+ "x":12,
+ "y":20
+ },
+ "id":34,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":true,
+ "min":true,
+ "show":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":1,
+ "links":[
+
+ ],
+ "nullPointMode":"null",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum(rate(etcd_request_latencies_summary_sum{}[1m])) by (operation) / sum(rate(etcd_request_latencies_summary_count{}[1m])) by (operation)",
+ "format":"time_series",
+ "intervalFactor":2,
+ "legendFormat":"{{operation}}",
+ "refId":"A",
+ "step":60
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Average cache request latencies",
+ "tooltip":{
+ "shared":true,
+ "sort":0,
+ "value_type":"individual"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"ms",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":"0",
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"Etcd",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":13
+ },
+ "id":55,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":48
+ },
+ "height":"",
+ "id":17,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum(rate (container_cpu_usage_seconds_total{image!=\"\",container=~\"$container\",namespace=~\"$namespace\"}[1m])) by (pod)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{pod}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Pods CPU usage (1m avg)",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "transparent":false,
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"none",
+ "label":"cores",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"Pods CPU usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":14
+ },
+ "id":56,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "description":"",
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":49
+ },
+ "height":"",
+ "id":24,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "hideEmpty":false,
+ "hideZero":false,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":null,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum(rate (container_cpu_usage_seconds_total{image!=\"\",container!=\"POD\",container=~\"$container\",namespace=~\"$namespace\"}[1m])) without (cpu)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{container}} (pod: {{pod}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Container CPU usage (1m avg)",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"none",
+ "label":"cores",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"Containers CPU usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":15
+ },
+ "id":57,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":50
+ },
+ "height":"",
+ "id":23,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (rate (container_cpu_usage_seconds_total{id=~\".*systemd.*service$\"}[1m])) by (id)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{id}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"System services CPU usage (1m avg)",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"none",
+ "label":"cores",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"System services CPU usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":16
+ },
+ "id":58,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":13,
+ "w":24,
+ "x":0,
+ "y":51
+ },
+ "id":20,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":false,
+ "show":true,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (rate (container_cpu_usage_seconds_total{id!=\"/\"}[1m])) by (id)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{id}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"All processes CPU usage (1m avg)",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"none",
+ "label":"cores",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"All processes CPU usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":17
+ },
+ "id":59,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":52
+ },
+ "id":25,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{image!=\"\",container=~\"$container\",namespace=~\"$namespace\"}) by (pod)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{pod}}",
+ "metric":"container_memory_usage:sort_desc",
+ "refId":"A",
+ "step":10
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Pods memory usage",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"bytes",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"Pods memory usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":18
+ },
+ "id":60,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":53
+ },
+ "id":27,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{image!=\"\",container!=\"POD\",container=~\"$container\",namespace=~\"$namespace\"}) by (container, pod)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{container}} (pod: {{pod}})",
+ "metric":"container_memory_usage:sort_desc",
+ "refId":"A",
+ "step":10
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Containers memory usage",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"bytes",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"Containers memory usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":19
+ },
+ "id":61,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":54
+ },
+ "id":26,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{id=~\".*systemd.*service$\"}) by (id)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{id}}",
+ "metric":"container_memory_usage:sort_desc",
+ "refId":"A",
+ "step":10
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"System services memory usage",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"bytes",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"System services memory usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":20
+ },
+ "id":62,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":13,
+ "w":24,
+ "x":0,
+ "y":55
+ },
+ "id":28,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":false,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum (container_memory_working_set_bytes{id!=\"/\"}) by (id)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{id}}",
+ "metric":"container_memory_usage:sort_desc",
+ "refId":"A",
+ "step":10
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"All processes memory usage",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"bytes",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ }
+ ],
+ "repeat":null,
+ "title":"All processes memory usage",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":21
+ },
+ "id":63,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":1,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":10,
+ "w":24,
+ "x":0,
+ "y":22
+ },
+ "height":"400px",
+ "id":32,
+ "legend":{
+ "alignAsTable":true,
+ "avg":false,
+ "current":true,
+ "max":true,
+ "min":true,
+ "rightSide":false,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum (rate (container_network_receive_bytes_total{}[1m]))",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"Received",
+ "metric":"network",
+ "refId":"A",
+ "step":15
+ },
+ {
+ "expr":"- sum (rate (container_network_transmit_bytes_total{}[1m]))",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"Sent",
+ "metric":"network",
+ "refId":"B",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Network I/O pressure",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":0,
+ "value_type":"cumulative"
+ },
+ "transparent":false,
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"Bps",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"Bps",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"Network I/O",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":22
+ },
+ "id":64,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":1,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":24,
+ "x":0,
+ "y":23
+ },
+ "id":16,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum (rate (container_network_receive_bytes_total{image!=\"\",pod=~\"$container.*\",namespace=~\"$namespace\"}[1m])) by (pod)",
+ "format":"time_series",
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"-> {{pod}}",
+ "metric":"network",
+ "refId":"A",
+ "step":15
+ },
+ {
+ "expr":"- sum (rate (container_network_transmit_bytes_total{image!=\"\",pod=~\"$container.*\",namespace=~\"$namespace\"}[1m])) by (pod)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"<- {{pod}}",
+ "metric":"network",
+ "refId":"B",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Pods network I/O (1m avg)",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"Bps",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"Pods network I/O",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":23
+ },
+ "id":65,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "editable":true,
+ "error":false,
+ "fill":1,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":13,
+ "w":24,
+ "x":0,
+ "y":24
+ },
+ "id":29,
+ "legend":{
+ "alignAsTable":true,
+ "avg":true,
+ "current":true,
+ "max":false,
+ "min":false,
+ "rightSide":false,
+ "show":true,
+ "sideWidth":200,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":false,
+ "targets":[
+ {
+ "expr":"sum (rate (container_network_receive_bytes_total{id!=\"/\"}[1m])) by (id)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"-> {{id}}",
+ "metric":"network",
+ "refId":"A",
+ "step":15
+ },
+ {
+ "expr":"- sum (rate (container_network_transmit_bytes_total{id!=\"/\"}[1m])) by (id)",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"<- {{id}}",
+ "metric":"network",
+ "refId":"B",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"All processes network I/O (1m avg)",
+ "tooltip":{
+ "msResolution":false,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "format":"Bps",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ],
+ "yaxis":{
+ "align":false,
+ "alignLevel":null
+ }
+ }
+ ],
+ "repeat":null,
+ "title":"All processes network I/O",
+ "type":"row"
+ },
+ {
+ "collapsed":true,
+ "gridPos":{
+ "h":1,
+ "w":24,
+ "x":0,
+ "y":24
+ },
+ "id":66,
+ "panels":[
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "description":"",
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":8,
+ "x":0,
+ "y":59
+ },
+ "height":"",
+ "id":45,
+ "legend":{
+ "alignAsTable":true,
+ "avg":false,
+ "current":true,
+ "hideEmpty":false,
+ "hideZero":false,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":null,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum by (container) (container_fs_usage_bytes{id!=\"/\"})",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{container}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Total Bytes Used By Containers",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "decimals":null,
+ "format":"bytes",
+ "label":"Bytes",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ },
+ {
+ "aliasColors":{
+
+ },
+ "bars":false,
+ "dashLength":10,
+ "dashes":false,
+ "datasource":"prometheus - Juju generated source",
+ "decimals":3,
+ "description":"",
+ "editable":true,
+ "error":false,
+ "fill":0,
+ "grid":{
+
+ },
+ "gridPos":{
+ "h":7,
+ "w":8,
+ "x":8,
+ "y":59
+ },
+ "height":"",
+ "id":46,
+ "legend":{
+ "alignAsTable":true,
+ "avg":false,
+ "current":true,
+ "hideEmpty":false,
+ "hideZero":false,
+ "max":false,
+ "min":false,
+ "rightSide":true,
+ "show":true,
+ "sideWidth":null,
+ "sort":"current",
+ "sortDesc":true,
+ "total":false,
+ "values":true
+ },
+ "lines":true,
+ "linewidth":2,
+ "links":[
+
+ ],
+ "nullPointMode":"connected",
+ "percentage":false,
+ "pointradius":5,
+ "points":false,
+ "renderer":"flot",
+ "seriesOverrides":[
+
+ ],
+ "spaceLength":10,
+ "stack":false,
+ "steppedLine":true,
+ "targets":[
+ {
+ "expr":"sum by (container) (container_fs_usage_bytes{id!=\"/\"}) / on (environment) group_left(id) sum (container_fs_usage_bytes{id=\"/\"}) * 100",
+ "format":"time_series",
+ "hide":false,
+ "interval":"10s",
+ "intervalFactor":1,
+ "legendFormat":"{{container}}",
+ "metric":"container_cpu",
+ "refId":"A",
+ "step":15
+ }
+ ],
+ "thresholds":[
+
+ ],
+ "timeFrom":null,
+ "timeShift":null,
+ "title":"Current Usage Breakdown By Container",
+ "tooltip":{
+ "msResolution":true,
+ "shared":true,
+ "sort":2,
+ "value_type":"cumulative"
+ },
+ "type":"graph",
+ "xaxis":{
+ "buckets":null,
+ "mode":"time",
+ "name":null,
+ "show":true,
+ "values":[
+
+ ]
+ },
+ "yaxes":[
+ {
+ "decimals":null,
+ "format":"percent",
+ "label":"Percent",
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":true
+ },
+ {
+ "format":"short",
+ "label":null,
+ "logBase":1,
+ "max":null,
+ "min":null,
+ "show":false
+ }
+ ]
+ },
+ {
+ "cacheTimeout":null,
+ "colorBackground":false,
+ "colorValue":false,
+ "colors":[
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource":"prometheus - Juju generated source",
+ "decimals":2,
+ "format":"bytes",
+ "gauge":{
+ "maxValue":100,
+ "minValue":0,
+ "show":false,
+ "thresholdLabels":false,
+ "thresholdMarkers":true
+ },
+ "gridPos":{
+ "h":7,
+ "w":8,
+ "x":16,
+ "y":59
+ },
+ "id":47,
+ "interval":null,
+ "links":[
+
+ ],
+ "mappingType":1,
+ "mappingTypes":[
+ {
+ "name":"value to text",
+ "value":1
+ },
+ {
+ "name":"range to text",
+ "value":2
+ }
+ ],
+ "maxDataPoints":100,
+ "nullPointMode":"connected",
+ "nullText":null,
+ "postfix":"",
+ "postfixFontSize":"50%",
+ "prefix":"",
+ "prefixFontSize":"50%",
+ "rangeMaps":[
+ {
+ "from":"null",
+ "text":"N/A",
+ "to":"null"
+ }
+ ],
+ "sparkline":{
+ "fillColor":"rgba(31, 118, 189, 0.18)",
+ "full":false,
+ "lineColor":"rgb(31, 120, 193)",
+ "show":false
+ },
+ "tableColumn":"",
+ "targets":[
+ {
+ "expr":"sum(container_fs_usage_bytes{id=\"/\"}) - sum(container_fs_usage_bytes{id!=\"/\"})",
+ "format":"time_series",
+ "instant":false,
+ "intervalFactor":2,
+ "refId":"A"
+ }
+ ],
+ "thresholds":"",
+ "title":"Total Bytes Not Used By Containers",
+ "transparent":false,
+ "type":"singlestat",
+ "valueFontSize":"80%",
+ "valueMaps":[
+ {
+ "op":"=",
+ "text":"N/A",
+ "value":"null"
+ }
+ ],
+ "valueName":"avg"
+ }
+ ],
+ "repeat":null,
+ "title":"Container Disk Utilisation",
+ "type":"row"
+ }
+ ],
+ "schemaVersion":16,
+ "style":"dark",
+ "tags":[
+ "Juju",
+ "Kubernetes"
+ ],
+ "templating":{
+ "list":[
+ {
+ "allValue":".*",
+ "current":{
+
+ },
+ "datasource":"prometheus - Juju generated source",
+ "hide":0,
+ "includeAll":true,
+ "label":null,
+ "multi":false,
+ "name":"namespace",
+ "options":[
+
+ ],
+ "query":"label_values(container_memory_usage_bytes{namespace=~\".+\",container!=\"POD\"},namespace)",
+ "refresh":1,
+ "regex":"",
+ "sort":1,
+ "tagValuesQuery":"",
+ "tags":[
+
+ ],
+ "tagsQuery":"",
+ "type":"query",
+ "useTags":false
+ },
+ {
+ "allValue":".*",
+ "current":{
+
+ },
+ "datasource":"prometheus - Juju generated source",
+ "hide":0,
+ "includeAll":true,
+ "label":null,
+ "multi":false,
+ "name":"container",
+ "options":[
+
+ ],
+ "query":"label_values(container_memory_usage_bytes{namespace=~\"$namespace\",container!=\"POD\"},container)",
+ "refresh":1,
+ "regex":"",
+ "sort":1,
+ "tagValuesQuery":"",
+ "tags":[
+
+ ],
+ "tagsQuery":"",
+ "type":"query",
+ "useTags":false
+ }
+ ]
+ },
+ "time":{
+ "from":"now-6h",
+ "to":"now"
+ },
+ "timepicker":{
+ "refresh_intervals":[
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options":[
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone":"utc",
+ "title":"Charmed Kubernetes Dashboard",
+ "version":1
+ }
+}
\ No newline at end of file
diff --git a/kubernetes-master/templates/grafana/conditional/prometheus.json b/kubernetes-master/templates/grafana/conditional/prometheus.json
new file mode 100644
index 0000000..35c428a
--- /dev/null
+++ b/kubernetes-master/templates/grafana/conditional/prometheus.json
@@ -0,0 +1,2186 @@
+{
+ "dashboard": {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Derived from https://grafana.com/dashboards/315",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": null,
+ "iteration": 1554419177157,
+ "links": [
+
+ ],
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "height": "200px",
+ "id": 32,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "Received",
+ "metric": "network",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "Sent",
+ "metric": "network",
+ "refId": "B",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Network I/O pressure",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 5
+ },
+ "height": "180px",
+ "id": 4,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster memory usage",
+ "transparent": false,
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 5
+ },
+ "height": "180px",
+ "id": 6,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster CPU usage (1m avg)",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 5
+ },
+ "height": "180px",
+ "id": 7,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "metric": "",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster filesystem usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 9,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "20%",
+ "prefix": "",
+ "prefixFontSize": "20%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"})",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 10,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 8,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 11,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 12,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 12,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 16,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 13,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"})",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 20,
+ "y": 10
+ },
+ "height": "1px",
+ "id": 14,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 13
+ },
+ "height": "",
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "B"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Pods CPU usage (1m avg)",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "height": "",
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": null,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_cpu",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_cpu",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Containers CPU usage (1m avg)",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 20,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "All processes CPU usage (1m avg)",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 40
+ },
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (pod_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Pods memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 47
+ },
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (container_name, pod_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Containers memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 54
+ },
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (id)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "All processes memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 67
+ },
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ pod_name }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ pod_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Pods network I/O (1m avg)",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 74
+ },
+ "id": 30,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "E",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "F",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "Containers network I/O (1m avg)",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 81
+ },
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ id }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)",
+ "format": "time_series",
+ "interval": "10s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ id }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeRegions": [
+
+ ],
+ "timeShift": null,
+ "title": "All processes network I/O (1m avg)",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 16,
+ "style": "dark",
+ "tags": [
+ "Juju"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {
+ "selected": true,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "prometheus - Juju generated source",
+ "definition": "",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Node",
+ "options": [
+
+ ],
+ "query": "label_values(kubernetes_io_hostname)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": ".*",
+ "current": {
+ "selected": false,
+ "tags": [
+
+ ],
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "prometheus - Juju generated source",
+ "definition": "label_values(namespace)",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Namespace",
+ "options": [
+
+ ],
+ "query": "label_values(namespace)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Kubernetes Metrics (via Prometheus)",
+ "version": 35
+ },
+ "overwrite": false
+}
diff --git a/kubernetes-master/templates/grafana/conditional/telegraf.json b/kubernetes-master/templates/grafana/conditional/telegraf.json
new file mode 100644
index 0000000..1e9c0f1
--- /dev/null
+++ b/kubernetes-master/templates/grafana/conditional/telegraf.json
@@ -0,0 +1,2094 @@
+{
+ "dashboard": {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "description": "Derived from https://grafana.com/dashboards/941",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [
+
+ ],
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "system_load5{host=~\"$node\"} ",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Node load average 5m",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "system_load15{host=~\"$node\"} ",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Node load average 15m",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "system_load1{host=~\"$node\"} ",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Node load average 1m",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Load average",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "processes_running{host=~\"$node\"} ",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Process running",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "processes_stopped{host=~\"$node\"} ",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Process stopped",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "processes_paging{host=~\"$node\"} ",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Process waiting",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Processes statistics",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_steal{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU steal",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_iowait{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU wait",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_user{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU user",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_system{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU system",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 11,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_softirq{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU soft interrupts",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 12,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_irq{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU interrupts",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 13,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_nice{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU nice",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "cpu_usage_idle{cpu=\"cpu-total\", host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Idle",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 15,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "mem_cached{host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mem cached",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 16,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "mem_buffered{host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mem buffered",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 17,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "mem_free{host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mem free",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 18,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "mem_used{host=~\"$node\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{host}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mem used",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 19,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(diskio_reads{name=~\"$disk\", host=~\"$node\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "Read {{host}} {{name}}",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "expr": "rate(diskio_writes{name=~\"$disk\", host=~\"$node\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "Write {{host}} {{name}}",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk read/s and write/s",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 20,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(diskio_read_bytes{name=~\"$disk\", host=~\"$node\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "Read {{host}} {{name}}",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "expr": "rate(diskio_write_bytes{name=~\"$disk\", host=~\"$node\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "Write {{host}} {{name}}",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk read/s and write/s",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Disk statistics",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "prometheus - Juju generated source",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {
+
+ },
+ "id": 22,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(net_bytes_sent{interface=~\"$interface\", host=~\"$node\"}[5m])*8",
+ "intervalFactor": 2,
+ "legendFormat": "Out {{host}} {{interface}}",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "expr": "rate(net_bytes_recv{interface=~\"$interface\", host=~\"$node\"}[5m])*8",
+ "intervalFactor": 2,
+ "legendFormat": "In {{host}} {{interface}}",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network load",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Network",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "Juju"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "prometheus - Juju generated source",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "node",
+ "options": [
+
+ ],
+ "query": "label_values(host)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Node Metrics (via Telegraf)",
+ "version": 4
+ },
+ "overwrite": false
+}
diff --git a/kubernetes-master/templates/keystone-api-server-webhook.yaml b/kubernetes-master/templates/keystone-api-server-webhook.yaml
new file mode 100644
index 0000000..684c3ee
--- /dev/null
+++ b/kubernetes-master/templates/keystone-api-server-webhook.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Config
+preferences: {}
+clusters:
+ - cluster:
+ server: https://{{ keystone_service_cluster_ip }}:8443/webhook
+ insecure-skip-tls-verify: true
+ name: webhook
+users:
+ - name: webhook
+contexts:
+ - context:
+ cluster: webhook
+ user: webhook
+ name: webhook
+current-context: webhook
diff --git a/kubernetes-master/templates/kube-keystone.sh b/kubernetes-master/templates/kube-keystone.sh
new file mode 100644
index 0000000..b9bd5bd
--- /dev/null
+++ b/kubernetes-master/templates/kube-keystone.sh
@@ -0,0 +1,51 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Replace with your public address and port for keystone
+export OS_AUTH_URL="{{ protocol }}://{{ address }}:{{ port }}/v{{ version }}"
+#export OS_PROJECT_NAME=k8s
+#export OS_DOMAIN_NAME=k8s
+#export OS_USERNAME=myuser
+#export OS_PASSWORD=secure_pw
+get_keystone_token() {
+ data='{
+ "auth": {
+ "identity": {
+ "methods": ["password"],
+ "password": {
+ "user": {
+ "name": "'"${OS_USERNAME}"'",
+ "domain": { "name": "'"${OS_DOMAIN_NAME}"'" },
+ "password": "'"${OS_PASSWORD}"'"
+ }
+ }
+ },
+ "scope": {
+ "project": {
+ "domain": {
+ "name": "'"${OS_DOMAIN_NAME}"'"
+ },
+ "name": "'"${OS_PROJECT_NAME}"'"
+ }
+ }
+ }
+}'
+ token=$(curl -s -i -H "Content-Type: application/json" -d "${data}" "${OS_AUTH_URL}/auth/tokens" |grep 'X-Subject-Token')
+ if [ -z "$token" ]; then
+ echo "Invalid authentication information"
+ else
+ echo $(echo ${token} | awk -F ': ' '{print $2}' | sed -e 's/[[:space:]]*$//')
+ fi
+}
+echo "Function get_keystone_token created. Type get_keystone_token in order to generate a login token for the Kubernetes dashboard."
diff --git a/kubernetes-master/templates/kube-proxy-iptables-fix.sh b/kubernetes-master/templates/kube-proxy-iptables-fix.sh
new file mode 100644
index 0000000..a6d219e
--- /dev/null
+++ b/kubernetes-master/templates/kube-proxy-iptables-fix.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# add the chain, note that adding twice is ok as it will just error.
+/sbin/iptables -t nat -N KUBE-MARK-DROP
+
+# need to check the creation of the rule to ensure we only create it once.
+if ! /sbin/iptables -t nat -C KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000 &> /dev/null; then
+ /sbin/iptables -t nat -A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
+fi
diff --git a/kubernetes-master/templates/nagios_plugin.py b/kubernetes-master/templates/nagios_plugin.py
new file mode 100644
index 0000000..1b2a329
--- /dev/null
+++ b/kubernetes-master/templates/nagios_plugin.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2019 Canonical Ltd.
+
+import nagios_plugin3
+import socket
+from subprocess import check_output
+
+snap_resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
+ 'kube-scheduler', 'cdk-addons', 'kube-proxy']
+
+
+def check_snaps_installed():
+ """Confirm the snaps are installed, raise an error if not"""
+ for snap_name in snap_resources:
+ cmd = ['snap', 'list', snap_name]
+ try:
+ check_output(cmd).decode('UTF-8')
+ except Exception:
+ msg = '{} snap is not installed'.format(snap_name)
+ raise nagios_plugin3.CriticalError(msg)
+
+
+def test_connection(host, port):
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(1)
+ s.connect((host, int(port)))
+ s.shutdown(socket.SHUT_RDWR)
+ finally:
+ s.close()
+
+
+def verify_remote_connection_to_apiserver():
+ try:
+ test_connection(socket.gethostbyname(socket.gethostname()), 6443)
+ except Exception:
+ raise nagios_plugin3.CriticalError("Unable to reach "
+ "API server on remote port")
+
+
+def main():
+ nagios_plugin3.try_check(check_snaps_installed)
+ nagios_plugin3.try_check(verify_remote_connection_to_apiserver)
+ print("OK - API server is up and accessible")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/kubernetes-master/templates/prometheus/k8s-api-endpoints.yaml.j2 b/kubernetes-master/templates/prometheus/k8s-api-endpoints.yaml.j2
new file mode 100644
index 0000000..83625e6
--- /dev/null
+++ b/kubernetes-master/templates/prometheus/k8s-api-endpoints.yaml.j2
@@ -0,0 +1,16 @@
+job_name: 'k8s-api-endpoints'
+kubernetes_sd_configs:
+- api_server: https://{{k8s_api_address}}:{{k8s_api_port}}
+ role: endpoints
+ tls_config:
+ ca_file: __placeholder__
+ bearer_token: {{k8s_token}}
+scrape_interval: 30s
+scheme: https
+tls_config:
+ ca_file: __placeholder__
+bearer_token: {{k8s_token}}
+relabel_configs:
+- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
diff --git a/kubernetes-master/templates/prometheus/kube-state-metrics.yaml.j2 b/kubernetes-master/templates/prometheus/kube-state-metrics.yaml.j2
new file mode 100644
index 0000000..a0692fe
--- /dev/null
+++ b/kubernetes-master/templates/prometheus/kube-state-metrics.yaml.j2
@@ -0,0 +1,10 @@
+job_name: 'kube-state-metrics'
+scrape_interval: 30s
+scheme: https
+tls_config:
+ ca_file: __placeholder__
+bearer_token: {{k8s_token}}
+metrics_path: /api/v1/namespaces/kube-system/services/kube-state-metrics:8080/proxy/metrics
+static_configs:
+ - targets:
+ - {{k8s_api_address}}:{{k8s_api_port}}
diff --git a/kubernetes-master/templates/prometheus/kube-state-telemetry.yaml.j2 b/kubernetes-master/templates/prometheus/kube-state-telemetry.yaml.j2
new file mode 100644
index 0000000..799ef39
--- /dev/null
+++ b/kubernetes-master/templates/prometheus/kube-state-telemetry.yaml.j2
@@ -0,0 +1,10 @@
+job_name: 'kube-state-telemetry'
+scrape_interval: 30s
+scheme: https
+tls_config:
+ ca_file: __placeholder__
+bearer_token: {{k8s_token}}
+metrics_path: /api/v1/namespaces/kube-system/services/kube-state-metrics:8081/proxy/metrics
+static_configs:
+ - targets:
+ - {{k8s_api_address}}:{{k8s_api_port}}
diff --git a/kubernetes-master/templates/prometheus/kubernetes-cadvisor.yaml.j2 b/kubernetes-master/templates/prometheus/kubernetes-cadvisor.yaml.j2
new file mode 100644
index 0000000..0ff8ed5
--- /dev/null
+++ b/kubernetes-master/templates/prometheus/kubernetes-cadvisor.yaml.j2
@@ -0,0 +1,21 @@
+job_name: 'kubernetes-cadvisor'
+kubernetes_sd_configs:
+- api_server: https://{{k8s_api_address}}:{{k8s_api_port}}
+ role: node
+ tls_config:
+ ca_file: __placeholder__
+ bearer_token: {{k8s_token}}
+scrape_interval: 30s
+scheme: https
+tls_config:
+ ca_file: __placeholder__
+bearer_token: {{k8s_token}}
+relabel_configs:
+- action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+- target_label: __address__
+ replacement: {{k8s_api_address}}:{{k8s_api_port}}
+- source_labels: [__meta_kubernetes_node_name]
+ regex: (.+)
+ target_label: __metrics_path__
+ replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
diff --git a/kubernetes-master/templates/prometheus/kubernetes-nodes.yaml.j2 b/kubernetes-master/templates/prometheus/kubernetes-nodes.yaml.j2
new file mode 100644
index 0000000..32ea077
--- /dev/null
+++ b/kubernetes-master/templates/prometheus/kubernetes-nodes.yaml.j2
@@ -0,0 +1,21 @@
+job_name: 'kubernetes-nodes'
+kubernetes_sd_configs:
+- api_server: https://{{k8s_api_address}}:{{k8s_api_port}}
+ role: node
+ tls_config:
+ ca_file: __placeholder__
+ bearer_token: {{k8s_token}}
+scrape_interval: 30s
+scheme: https
+tls_config:
+ ca_file: __placeholder__
+bearer_token: {{k8s_token}}
+relabel_configs:
+- action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+- target_label: __address__
+ replacement: {{k8s_api_address}}:{{k8s_api_port}}
+- source_labels: [__meta_kubernetes_node_name]
+ regex: (.+)
+ target_label: __metrics_path__
+ replacement: /api/v1/nodes/$1/proxy/metrics
diff --git a/kubernetes-master/templates/rbac-pod-security-policy.yaml b/kubernetes-master/templates/rbac-pod-security-policy.yaml
new file mode 100644
index 0000000..d95eb4d
--- /dev/null
+++ b/kubernetes-master/templates/rbac-pod-security-policy.yaml
@@ -0,0 +1,55 @@
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: privileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - '*'
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: privileged
+rules:
+- apiGroups: ['policy']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - privileged
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: privileged
+roleRef:
+ kind: ClusterRole
+ name: privileged
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+ name: system:serviceaccounts
+ apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
diff --git a/kubernetes-master/templates/rbac-proxy.yaml b/kubernetes-master/templates/rbac-proxy.yaml
new file mode 100644
index 0000000..7b570ca
--- /dev/null
+++ b/kubernetes-master/templates/rbac-proxy.yaml
@@ -0,0 +1,24 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: proxy-clusterrole-cdk-{{ juju_application }}
+rules:
+- apiGroups: [""]
+ resources:
+ - nodes/metrics
+ - nodes/proxy
+ verbs: ["get", "list", "watch", "create", "delete"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: proxy-role-binding-cdk-{{ juju_application }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: proxy-clusterrole-cdk-{{ juju_application }}
+subjects: {% for proxy_user in proxy_users %}
+- apiGroup: rbac.authorization.k8s.io
+ kind: User
+ name: {{ proxy_user }}
+{% endfor %}
diff --git a/kubernetes-master/templates/rbd-persistent-volume.yaml b/kubernetes-master/templates/rbd-persistent-volume.yaml
new file mode 100644
index 0000000..84248e5
--- /dev/null
+++ b/kubernetes-master/templates/rbd-persistent-volume.yaml
@@ -0,0 +1,25 @@
+# JUJU Internal Template used to enlist RBD volumes from the
+# `create-rbd-pv` action. This is a temporary file on disk to enlist resources.
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ RBD_NAME }}
+spec:
+ capacity:
+ storage: {{ RBD_SIZE }}M
+ accessModes:
+ - {{ PV_MODE }}
+ storageClassName: "rbd"
+ rbd:
+ monitors:
+ {% for host in monitors %}
+ - {{ host }}
+ {% endfor %}
+ pool: rbd
+ image: {{ RBD_NAME }}
+ user: admin
+ secretRef:
+ name: ceph-secret
+ fsType: {{ RBD_FS }}
+ readOnly: false
+ # persistentVolumeReclaimPolicy: Recycle
diff --git a/kubernetes-master/templates/service-always-restart.systemd-229.conf b/kubernetes-master/templates/service-always-restart.systemd-229.conf
new file mode 100644
index 0000000..d5cf4b1
--- /dev/null
+++ b/kubernetes-master/templates/service-always-restart.systemd-229.conf
@@ -0,0 +1,5 @@
+[Unit]
+StartLimitInterval=0
+
+[Service]
+RestartSec=10
diff --git a/kubernetes-master/templates/service-always-restart.systemd-latest.conf b/kubernetes-master/templates/service-always-restart.systemd-latest.conf
new file mode 100644
index 0000000..3dd37ab
--- /dev/null
+++ b/kubernetes-master/templates/service-always-restart.systemd-latest.conf
@@ -0,0 +1,5 @@
+[Unit]
+StartLimitIntervalSec=0
+
+[Service]
+RestartSec=10
diff --git a/kubernetes-master/templates/service-iptables-fix.service b/kubernetes-master/templates/service-iptables-fix.service
new file mode 100644
index 0000000..913aed3
--- /dev/null
+++ b/kubernetes-master/templates/service-iptables-fix.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Apply iptables rule for KUBE-MARK-DROP
+After=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/kube-proxy-iptables-fix.sh
+RemainAfterExit=true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-master/templates/system-monitoring-rbac-role.yaml b/kubernetes-master/templates/system-monitoring-rbac-role.yaml
new file mode 100644
index 0000000..ebbd95e
--- /dev/null
+++ b/kubernetes-master/templates/system-monitoring-rbac-role.yaml
@@ -0,0 +1,29 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: system:monitoring
+rules:
+- apiGroups: [""]
+ resources:
+ - "endpoints"
+ - "nodes"
+ - "nodes/proxy"
+ - "pods"
+ - "services"
+ - "services/proxy"
+ verbs: ["get", "list", "watch"]
+- nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: system:monitoring
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:monitoring
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: User
+ name: system:monitoring
diff --git a/kubernetes-master/templates/vaultlocker-loop@.service b/kubernetes-master/templates/vaultlocker-loop@.service
new file mode 100644
index 0000000..8dd0230
--- /dev/null
+++ b/kubernetes-master/templates/vaultlocker-loop@.service
@@ -0,0 +1,12 @@
+[Install]
+RequiredBy=vaultlocker-decrypt@%i.service
+
+[Unit]
+Description=Ensure loop device for VaultLocker %i
+Before=vaultlocker-decrypt@%i.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=true
+EnvironmentFile=/etc/vaultlocker/loop-envs/%i
+ExecStart=/sbin/losetup -f ${BACK_FILE}
diff --git a/kubernetes-master/templates/vaultlocker.conf.j2 b/kubernetes-master/templates/vaultlocker.conf.j2
new file mode 100644
index 0000000..911a54c
--- /dev/null
+++ b/kubernetes-master/templates/vaultlocker.conf.j2
@@ -0,0 +1,5 @@
+[vault]
+url = {{ vault_url }}
+approle = {{ role_id }}
+backend = {{ secret_backend }}
+secret_id = {{ secret_id }}
diff --git a/kubernetes-master/tox.ini b/kubernetes-master/tox.ini
new file mode 100644
index 0000000..805089a
--- /dev/null
+++ b/kubernetes-master/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+envlist = py3
+skipsdist = true
+
+[testenv]
+basepython=python3
+envdir={toxworkdir}/py3
+deps=
+ ipdb
+ pytest
+ charms.reactive
+ pydoc-markdown
+ # needed to prevent apt installs during import
+ netifaces
+ psutil
+
+[testenv:docs]
+commands=python make_docs
diff --git a/kubernetes-master/version b/kubernetes-master/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/kubernetes-master/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/kubernetes-master/wheelhouse.txt b/kubernetes-master/wheelhouse.txt
new file mode 100644
index 0000000..dacf089
--- /dev/null
+++ b/kubernetes-master/wheelhouse.txt
@@ -0,0 +1,29 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
+# layer:snap
+tenacity
+
+# layer:vault-kv
+hvac
+# needed to prevent apt installs during import
+netifaces
+psutil
+
+# kubernetes-master
+flask>=1.0.0,<2.0.0
+gunicorn>=20.0.0,<21.0.0
+
diff --git a/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz b/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz
new file mode 100644
index 0000000..e264330
Binary files /dev/null and b/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/Jinja2-2.10.1.tar.gz b/kubernetes-master/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/kubernetes-master/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/MarkupSafe-1.1.1.tar.gz b/kubernetes-master/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/kubernetes-master/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/PyYAML-5.2.tar.gz b/kubernetes-master/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/kubernetes-master/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/Tempita-0.5.2.tar.gz b/kubernetes-master/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/kubernetes-master/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz b/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz
new file mode 100644
index 0000000..e92c86e
Binary files /dev/null and b/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz b/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz
new file mode 100644
index 0000000..3023d0a
Binary files /dev/null and b/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/chardet-4.0.0.tar.gz b/kubernetes-master/wheelhouse/chardet-4.0.0.tar.gz
new file mode 100644
index 0000000..6bfc4e3
Binary files /dev/null and b/kubernetes-master/wheelhouse/chardet-4.0.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/charms.reactive-1.4.1.tar.gz b/kubernetes-master/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/kubernetes-master/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/click-7.1.2.tar.gz b/kubernetes-master/wheelhouse/click-7.1.2.tar.gz
new file mode 100644
index 0000000..698411c
Binary files /dev/null and b/kubernetes-master/wheelhouse/click-7.1.2.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/gunicorn-20.1.0.tar.gz b/kubernetes-master/wheelhouse/gunicorn-20.1.0.tar.gz
new file mode 100644
index 0000000..b5da493
Binary files /dev/null and b/kubernetes-master/wheelhouse/gunicorn-20.1.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz b/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz
new file mode 100644
index 0000000..59d8fc7
Binary files /dev/null and b/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/idna-2.10.tar.gz b/kubernetes-master/wheelhouse/idna-2.10.tar.gz
new file mode 100644
index 0000000..e9a9e03
Binary files /dev/null and b/kubernetes-master/wheelhouse/idna-2.10.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz b/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz
new file mode 100644
index 0000000..13644ac
Binary files /dev/null and b/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/netaddr-0.7.19.tar.gz b/kubernetes-master/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/kubernetes-master/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz b/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz
new file mode 100644
index 0000000..97ea2e7
Binary files /dev/null and b/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/pbr-5.6.0.tar.gz b/kubernetes-master/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/kubernetes-master/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/pip-18.1.tar.gz b/kubernetes-master/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/kubernetes-master/wheelhouse/pip-18.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/psutil-5.8.0.tar.gz b/kubernetes-master/wheelhouse/psutil-5.8.0.tar.gz
new file mode 100644
index 0000000..7907528
Binary files /dev/null and b/kubernetes-master/wheelhouse/psutil-5.8.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz b/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz b/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz
new file mode 100644
index 0000000..9dcfcf2
Binary files /dev/null and b/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/setuptools-41.6.0.zip b/kubernetes-master/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/kubernetes-master/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/kubernetes-master/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubernetes-master/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/kubernetes-master/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/six-1.15.0.tar.gz b/kubernetes-master/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/kubernetes-master/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz b/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz
new file mode 100644
index 0000000..2050c4d
Binary files /dev/null and b/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz b/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz
new file mode 100644
index 0000000..4d693e7
Binary files /dev/null and b/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz differ
diff --git a/kubernetes-master/wheelhouse/wheel-0.33.6.tar.gz b/kubernetes-master/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/kubernetes-master/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/kubernetes-worker/.build.manifest b/kubernetes-worker/.build.manifest
new file mode 100644
index 0000000..4bf868d
--- /dev/null
+++ b/kubernetes-worker/.build.manifest
@@ -0,0 +1,1666 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
+ "url": "layer:debug"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "85d7cc4f7180d19df20e264358e920004cec192b",
+ "url": "layer:snap"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
+ "url": "layer:leadership"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46",
+ "url": "layer:metrics"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e",
+ "url": "layer:nagios"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fb46dec78d390571753d21876bbba689bbbca9e4",
+ "url": "layer:tls-client"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688",
+ "url": "layer:cdk-service-kicker"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "023c67941e18663a4df49f53edba809f43ba5069",
+ "url": "layer:cis-benchmark"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf",
+ "url": "layer:coordinator"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "763297a075b3654f261af20c84b940d87f55354e",
+ "url": "layer:kubernetes-common"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "809f06c6f6521be59e21859eaebeccd13f4d8c28",
+ "url": "layer:kubernetes-master-worker-base"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "39ba9cb410333cb3b5693e83407a865fef96e45f",
+ "url": "kubernetes-worker"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc",
+ "url": "interface:nrpe-external-master"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d9850016d930a6d507b9fd45e2598d327922b140",
+ "url": "interface:tls-certificates"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "6f927f10b97f45c566481cf57a29d433f17373e1",
+ "url": "interface:container-runtime"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09",
+ "url": "interface:http"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b",
+ "url": "interface:kubernetes-cni"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "9bc32742b7720a755ada9526424e5d80092e1536",
+ "url": "interface:kube-dns"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "2236a52be495a45b8f492bae37bbba50e468ef42",
+ "url": "interface:kube-control"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7",
+ "url": "interface:aws-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb",
+ "url": "interface:gcp-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "0d7a994f04b9e92ed847829ce8349b1a9c672e47",
+ "url": "interface:openstack-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b",
+ "url": "interface:vsphere-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "45b79107f7bd5f14b3b956d1f45f659a567b0999",
+ "url": "interface:azure-integration"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "d5a2526fec9c3e8581f18b56e84a86871583e080",
+ "url": "interface:mount"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".github/workflows/build.yml": [
+ "kubernetes-worker",
+ "static",
+ "beab3a0e5eefddd3b3f3c11892725e819343b689b13a1872eaac1eb2d9ea083b"
+ ],
+ ".gitignore": [
+ "kubernetes-worker",
+ "static",
+ "e028ad966843fa4e09963c008d1200117caf1a42163c70795d9c55406f801d8c"
+ ],
+ ".travis.yml": [
+ "kubernetes-worker",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "CONTRIBUTING.md": [
+ "kubernetes-worker",
+ "static",
+ "bbe14e93d7db43d022103e3088036dc6fc1ca0554538a8205bcc07ef730d1ded"
+ ],
+ "HACKING.md": [
+ "kubernetes-worker",
+ "static",
+ "fc87d881098225b72f3ca6a1b8e01cce143b130e3dc8ac1ec484a0de19bc4d6c"
+ ],
+ "LICENSE": [
+ "kubernetes-worker",
+ "static",
+ "f02fd85a4171482f6bb1d6f87fe0704d3a2da93eca04afe39a0310a00c409902"
+ ],
+ "Makefile": [
+ "kubernetes-worker",
+ "static",
+ "b000b0f022a76c3d3ee955c3d467af17ed9a38235677dfd9193c1b0a843d4050"
+ ],
+ "README.md": [
+ "kubernetes-worker",
+ "static",
+ "f7bfc0dab834e67d9be6dda2e942a8c9c36d03bf9fef8b94d00ec9c51d91742d"
+ ],
+ "actions.yaml": [
+ "kubernetes-worker",
+ "dynamic",
+ "fc18958b027e694ba41fcb598ba7374e38be9b72526e2dae73473df5f630d489"
+ ],
+ "actions/cis-benchmark": [
+ "layer:cis-benchmark",
+ "static",
+ "fd3c1b8ba478b7f933605897ace8ae9f3ee102d9992f46f1e36d95eb1b094b84"
+ ],
+ "actions/debug": [
+ "layer:debug",
+ "static",
+ "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046"
+ ],
+ "actions/microbot": [
+ "kubernetes-worker",
+ "static",
+ "cfc909b8a86c5517dec02bf40201c946efad02d2b7ec20a811f19aa5c4e3e1a0"
+ ],
+ "actions/pause": [
+ "kubernetes-worker",
+ "static",
+ "ba4a19dc800ff6381367ad2bd84b0ad0c06180a77834c762750a4bdedb9ff366"
+ ],
+ "actions/registry": [
+ "kubernetes-worker",
+ "static",
+ "178024c8442ad2d6ffd6f09b4f0278792a74933ac7caa611f897de9e5b04473b"
+ ],
+ "actions/resume": [
+ "kubernetes-worker",
+ "static",
+ "460d50796be763674cdadb5d88ccdc0c883eb21fb3cf86805c46da18922022a7"
+ ],
+ "actions/upgrade": [
+ "kubernetes-worker",
+ "static",
+ "641458372b97c9a0ef15c00aa8934d631f3c3b159d53f6e30cf8022bd3ff705e"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "build-cni-resources.sh": [
+ "kubernetes-worker",
+ "static",
+ "687a1ada1b9cc1aeefc83eb61939662a28fe4d6c1ce50c2a3828f4c46a90546d"
+ ],
+ "config.yaml": [
+ "kubernetes-worker",
+ "dynamic",
+ "b7e5bf623ac413025b9e868845c59e4b8b437506df3f12e41a43ecea751761e7"
+ ],
+ "copyright": [
+ "kubernetes-worker",
+ "static",
+ "badd4492d214890abd07b615f9e1a7a5ff3339b6c44655a826c746a9263ff00d"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-coordinator": [
+ "layer:coordinator",
+ "static",
+ "7d212a095a6143559fb51f26bc40c2ba24b977190f65c7e5c835104f54d5dfc5"
+ ],
+ "copyright.layer-leadership": [
+ "layer:leadership",
+ "static",
+ "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
+ ],
+ "copyright.layer-metrics": [
+ "layer:metrics",
+ "static",
+ "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c"
+ ],
+ "copyright.layer-nagios": [
+ "layer:nagios",
+ "static",
+ "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-snap": [
+ "layer:snap",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "debug-scripts/charm-unitdata": [
+ "layer:debug",
+ "static",
+ "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7"
+ ],
+ "debug-scripts/filesystem": [
+ "layer:debug",
+ "static",
+ "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7"
+ ],
+ "debug-scripts/inotify": [
+ "kubernetes-worker",
+ "static",
+ "8991354951b11e32a9edf4736e7ca0d5948d6c30a9a83673193aadf829032223"
+ ],
+ "debug-scripts/juju-logs": [
+ "layer:debug",
+ "static",
+ "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e"
+ ],
+ "debug-scripts/juju-network-get": [
+ "layer:debug",
+ "static",
+ "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5"
+ ],
+ "debug-scripts/kubectl": [
+ "kubernetes-worker",
+ "static",
+ "dadc2eae5818d818ac0b10029056d0db975406c17211864e08d1fa9780bb82c2"
+ ],
+ "debug-scripts/kubernetes-worker-services": [
+ "kubernetes-worker",
+ "static",
+ "fca2c57d754d9968c80308031fd9de7cfd2ddda37de5b2ff49ba1ccf333c5a58"
+ ],
+ "debug-scripts/network": [
+ "layer:debug",
+ "static",
+ "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e"
+ ],
+ "debug-scripts/packages": [
+ "layer:debug",
+ "static",
+ "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75"
+ ],
+ "debug-scripts/sysctl": [
+ "layer:debug",
+ "static",
+ "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a"
+ ],
+ "debug-scripts/systemd": [
+ "layer:debug",
+ "static",
+ "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106"
+ ],
+ "debug-scripts/tls-certs": [
+ "layer:tls-client",
+ "static",
+ "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b"
+ ],
+ "exec.d/docker-compose/charm-pre-install": [
+ "layer:kubernetes-master-worker-base",
+ "static",
+ "32482c2a88209cbe512990db5fb4deabdcff88282bf7c7dd71a265383139fc77"
+ ],
+ "exec.d/vmware-patch/charm-pre-install": [
+ "kubernetes-worker",
+ "static",
+ "9f98f70669ddd949ff83c7b408b678ae170bf41e4faa2828b4d66bd47acca93e"
+ ],
+ "hooks/aws-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/aws-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/azure-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/certificates-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/cni-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/collect-metrics": [
+ "layer:metrics",
+ "static",
+ "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/container-runtime-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-changed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/coordinator-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/coordinator-relation-departed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/coordinator-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/gcp-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ingress-proxy-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ingress-proxy-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ingress-proxy-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ingress-proxy-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/ingress-proxy-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-api-endpoint-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-control-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-dns-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-dns-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-dns-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-dns-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/kube-dns-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:coordinator",
+ "static",
+ "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6"
+ ],
+ "hooks/nfs-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nfs-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nfs-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nfs-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nfs-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/nrpe-external-master-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/openstack-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/aws-integration/.gitignore": [
+ "interface:aws-integration",
+ "static",
+ "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b"
+ ],
+ "hooks/relations/aws-integration/LICENSE": [
+ "interface:aws-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/aws-integration/README.md": [
+ "interface:aws-integration",
+ "static",
+ "1585d72b136158ce0741fc2ce0d7710c1ec55662f846afe2e768a4708c51057e"
+ ],
+ "hooks/relations/aws-integration/__init__.py": [
+ "interface:aws-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/aws-integration/copyright": [
+ "interface:aws-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/aws-integration/docs/provides.md": [
+ "interface:aws-integration",
+ "static",
+ "a7669f49156173c27ede87105f6e65a07e1e5e41f3c154a24e1a82f307f65073"
+ ],
+ "hooks/relations/aws-integration/docs/requires.md": [
+ "interface:aws-integration",
+ "static",
+ "09553e5f07f216e5234125fdf38a21af00ab11349cdb788b21703ae72b0aeed1"
+ ],
+ "hooks/relations/aws-integration/interface.yaml": [
+ "interface:aws-integration",
+ "static",
+ "4449f48e5aaa99c0bb3e8e1c9833d11d3b20fc5f81ae1f15b6442af5ec873167"
+ ],
+ "hooks/relations/aws-integration/make_docs": [
+ "interface:aws-integration",
+ "static",
+ "b471fefc7eaa5c377d47b2b63481d6c8f4c5e9d224428efe93c5abbd13a0817d"
+ ],
+ "hooks/relations/aws-integration/provides.py": [
+ "interface:aws-integration",
+ "static",
+ "ee8f91b281d9112999f3d0e1d2ac17964fca3af5102fe5b072f3f3659b932ab7"
+ ],
+ "hooks/relations/aws-integration/pydocmd.yml": [
+ "interface:aws-integration",
+ "static",
+ "8c242cde2b2517c74de8ad6b1b90d2f6d97b2eb86c54edaf2eb8a8f7d32913e8"
+ ],
+ "hooks/relations/aws-integration/requires.py": [
+ "interface:aws-integration",
+ "static",
+ "3006d6a2607bc15507bec3e6144093c6938a51a22eee1f550d714ff702728c39"
+ ],
+ "hooks/relations/azure-integration/.gitignore": [
+ "interface:azure-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/azure-integration/LICENSE": [
+ "interface:azure-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/azure-integration/README.md": [
+ "interface:azure-integration",
+ "static",
+ "c7799dba9471709e086dcd2ea272ad7a6e33f5058d875ce2bf5b3a6939d4a1e7"
+ ],
+ "hooks/relations/azure-integration/__init__.py": [
+ "interface:azure-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/azure-integration/copyright": [
+ "interface:azure-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/azure-integration/docs/provides.md": [
+ "interface:azure-integration",
+ "static",
+ "60ae63187cac32c00d9f462f1723c9487960c728beae871f1a409c92196cc1f5"
+ ],
+ "hooks/relations/azure-integration/docs/requires.md": [
+ "interface:azure-integration",
+ "static",
+ "b01e313c8ce3d02093e851bd84d5e8b7ae77b300c4b06b5048bddc78c1ad3eb3"
+ ],
+ "hooks/relations/azure-integration/interface.yaml": [
+ "interface:azure-integration",
+ "static",
+ "cea5bfd87c278bd3f2e8dc00e654930f06d2bd91ef731a063edea14b04d9128a"
+ ],
+ "hooks/relations/azure-integration/make_docs": [
+ "interface:azure-integration",
+ "static",
+ "e76f4a64c2fdc4a9f97a57d6515b4a25f9404d7043f2792db5206bc44213927c"
+ ],
+ "hooks/relations/azure-integration/provides.py": [
+ "interface:azure-integration",
+ "static",
+ "a3a1de7f79c5f2cc37f2dff450d8e9b2ce36c63c0328bb6bedd2ade7519a7442"
+ ],
+ "hooks/relations/azure-integration/pydocmd.yml": [
+ "interface:azure-integration",
+ "static",
+ "4c17085efb4ec328891b49257413eed4d9a552eeea8e589509e48081effe51ed"
+ ],
+ "hooks/relations/azure-integration/requires.py": [
+ "interface:azure-integration",
+ "static",
+ "112bfa057cdcf91a812dea080330e9323f4d7e4b1bcacfd69b3ad95dd2274cbb"
+ ],
+ "hooks/relations/container-runtime/.gitignore": [
+ "interface:container-runtime",
+ "static",
+ "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732"
+ ],
+ "hooks/relations/container-runtime/LICENSE": [
+ "interface:container-runtime",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/container-runtime/README.md": [
+ "interface:container-runtime",
+ "static",
+ "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611"
+ ],
+ "hooks/relations/container-runtime/__init__.py": [
+ "interface:container-runtime",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/container-runtime/interface.yaml": [
+ "interface:container-runtime",
+ "static",
+ "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6"
+ ],
+ "hooks/relations/container-runtime/provides.py": [
+ "interface:container-runtime",
+ "static",
+ "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06"
+ ],
+ "hooks/relations/container-runtime/requires.py": [
+ "interface:container-runtime",
+ "static",
+ "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761"
+ ],
+ "hooks/relations/coordinator/peers.py": [
+ "layer:coordinator",
+ "static",
+ "d615c442396422a30a0c5f7639750d15bb59247ae5d9362c4f5dc8dd2cc7fff2"
+ ],
+ "hooks/relations/gcp-integration/.gitignore": [
+ "interface:gcp-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/gcp-integration/LICENSE": [
+ "interface:gcp-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/gcp-integration/README.md": [
+ "interface:gcp-integration",
+ "static",
+ "dab3f4a03f02dec0095883054780e3e3f1bf63262b06a9fd499364a3db8b1e97"
+ ],
+ "hooks/relations/gcp-integration/__init__.py": [
+ "interface:gcp-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/gcp-integration/copyright": [
+ "interface:gcp-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/gcp-integration/docs/provides.md": [
+ "interface:gcp-integration",
+ "static",
+ "a67cda4094b4d601c8de63cf099ba2e83fecf3a8382e88f44e58b98be8872fa6"
+ ],
+ "hooks/relations/gcp-integration/docs/requires.md": [
+ "interface:gcp-integration",
+ "static",
+ "d7e6d7dc90b74d35bf2bd10b00b3ba289ab856dc79ec51046508a85b9dda35a3"
+ ],
+ "hooks/relations/gcp-integration/interface.yaml": [
+ "interface:gcp-integration",
+ "static",
+ "368e8ade9267b905dcb2e6843e7ed61bd6d246f0b0c18942e729f546d5db2260"
+ ],
+ "hooks/relations/gcp-integration/make_docs": [
+ "interface:gcp-integration",
+ "static",
+ "5bf011da5045c31da97a67b8633d30ea90adc6c0d4d823f839fce6e07e5fe222"
+ ],
+ "hooks/relations/gcp-integration/provides.py": [
+ "interface:gcp-integration",
+ "static",
+ "839f15cf978cf94343772889846ad3e2b8375372ef25ed08036207e5608b1f48"
+ ],
+ "hooks/relations/gcp-integration/pydocmd.yml": [
+ "interface:gcp-integration",
+ "static",
+ "2d5a524cbde5ccf732b67382a85deb7c26dfb92315c30d26c2b2d5632a2a8f38"
+ ],
+ "hooks/relations/gcp-integration/requires.py": [
+ "interface:gcp-integration",
+ "static",
+ "79c75c6c76b37bc5ac486ac2e14f853223c4c603850d2f231f187ab255cbdbf0"
+ ],
+ "hooks/relations/http/.gitignore": [
+ "interface:http",
+ "static",
+ "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8"
+ ],
+ "hooks/relations/http/README.md": [
+ "interface:http",
+ "static",
+ "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d"
+ ],
+ "hooks/relations/http/__init__.py": [
+ "interface:http",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/http/interface.yaml": [
+ "interface:http",
+ "static",
+ "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13"
+ ],
+ "hooks/relations/http/provides.py": [
+ "interface:http",
+ "static",
+ "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877"
+ ],
+ "hooks/relations/http/requires.py": [
+ "interface:http",
+ "static",
+ "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd"
+ ],
+ "hooks/relations/kube-control/.travis.yml": [
+ "interface:kube-control",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ "hooks/relations/kube-control/README.md": [
+ "interface:kube-control",
+ "static",
+ "66ee58f59efceefa21f7f2d7f88c1d75c07a16bbec8d09a83a7fda6373eab421"
+ ],
+ "hooks/relations/kube-control/__init__.py": [
+ "interface:kube-control",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kube-control/interface.yaml": [
+ "interface:kube-control",
+ "static",
+ "07e3d781283ecbb59c780cc8e4aeb9f030f22d2db6c28d731b74a36ab126960d"
+ ],
+ "hooks/relations/kube-control/provides.py": [
+ "interface:kube-control",
+ "static",
+ "5dffb8504d0993ad756b0631fd82ef465dc9127641b448bea76596fc6f3e55c4"
+ ],
+ "hooks/relations/kube-control/requires.py": [
+ "interface:kube-control",
+ "static",
+ "496ed9b2d4f6fef2e1e26b53b8f8c97e67b9a96b4fcfcb40ef671d2469b983e3"
+ ],
+ "hooks/relations/kube-dns/README.md": [
+ "interface:kube-dns",
+ "static",
+ "f02265c0931c5582cbad911050ee1578c370e4ecaffdbf56d11505f97ce44fee"
+ ],
+ "hooks/relations/kube-dns/__init__.py": [
+ "interface:kube-dns",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kube-dns/interface.yaml": [
+ "interface:kube-dns",
+ "static",
+ "e4ca8faafe4cce43eed862d35346780df4cba4eb243baaf5aecd891514deb26d"
+ ],
+ "hooks/relations/kube-dns/provides.py": [
+ "interface:kube-dns",
+ "static",
+ "f0ea4f0610779a70860d5257f0760f62ea2ec682c5f005ba5afff92c9824aa36"
+ ],
+ "hooks/relations/kube-dns/requires.py": [
+ "interface:kube-dns",
+ "static",
+ "38b819b7ee98c3c38142d2cc8122dedd9d8c0f34767c5cc11392a564f38db370"
+ ],
+ "hooks/relations/kubernetes-cni/.gitignore": [
+ "interface:kubernetes-cni",
+ "static",
+ "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
+ ],
+ "hooks/relations/kubernetes-cni/.travis.yml": [
+ "interface:kubernetes-cni",
+ "static",
+ "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
+ ],
+ "hooks/relations/kubernetes-cni/README.md": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/__init__.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/kubernetes-cni/interface.yaml": [
+ "interface:kubernetes-cni",
+ "static",
+ "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d"
+ ],
+ "hooks/relations/kubernetes-cni/provides.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e"
+ ],
+ "hooks/relations/kubernetes-cni/requires.py": [
+ "interface:kubernetes-cni",
+ "static",
+ "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa"
+ ],
+ "hooks/relations/mount/.gitignore": [
+ "interface:mount",
+ "static",
+ "f107e9960f299957deb6087dbc043b5ca51a7e78f5895f9444bb5bf91a6b579d"
+ ],
+ "hooks/relations/mount/LICENSE": [
+ "interface:mount",
+ "static",
+ "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
+ ],
+ "hooks/relations/mount/README.md": [
+ "interface:mount",
+ "static",
+ "b8dc1667fe75f4339b4a6cfcb2272eb2c066268c1de08d24dd95880c0ba32e2d"
+ ],
+ "hooks/relations/mount/__init__.py": [
+ "interface:mount",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/mount/copyright": [
+ "interface:mount",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/mount/interface.yaml": [
+ "interface:mount",
+ "static",
+ "038465e3afcdc6344a43fe5e224cb3468866e311d9c0c83920b4454c4ac8b602"
+ ],
+ "hooks/relations/mount/provides.py": [
+ "interface:mount",
+ "static",
+ "39a3c6f245f2df8e3df82f7995207eaec06e0beec4bc6c412d30c777a7794e88"
+ ],
+ "hooks/relations/mount/requires.py": [
+ "interface:mount",
+ "static",
+ "c2e9ad42d6009818211bb28d11e365f90b073829d5cc847998060b6009e37ff3"
+ ],
+ "hooks/relations/nrpe-external-master/README.md": [
+ "interface:nrpe-external-master",
+ "static",
+ "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636"
+ ],
+ "hooks/relations/nrpe-external-master/__init__.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/nrpe-external-master/interface.yaml": [
+ "interface:nrpe-external-master",
+ "static",
+ "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0"
+ ],
+ "hooks/relations/nrpe-external-master/provides.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e6ba708d05b227b139a86be59c83ed95a2bad030bc81e5819167ba5e1e67ecd4"
+ ],
+ "hooks/relations/nrpe-external-master/requires.py": [
+ "interface:nrpe-external-master",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/openstack-integration/.gitignore": [
+ "interface:openstack-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/openstack-integration/LICENSE": [
+ "interface:openstack-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/openstack-integration/README.md": [
+ "interface:openstack-integration",
+ "static",
+ "ca58e21bd973f6e65f7a8a06b4aeabd50bf137ab6fab9c8defa8789b02df3aa5"
+ ],
+ "hooks/relations/openstack-integration/__init__.py": [
+ "interface:openstack-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/openstack-integration/copyright": [
+ "interface:openstack-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/openstack-integration/docs/provides.md": [
+ "interface:openstack-integration",
+ "static",
+ "ec4b81da3dfeac892f94053d753b56e504f5fd9c6ec4e743efa40efade3aa651"
+ ],
+ "hooks/relations/openstack-integration/docs/requires.md": [
+ "interface:openstack-integration",
+ "static",
+ "95424fe767a26e3208800b4099f8768212b0a72b989ee145f181b67d678e3bbe"
+ ],
+ "hooks/relations/openstack-integration/interface.yaml": [
+ "interface:openstack-integration",
+ "static",
+ "11b07a41bd2e24765231c4b7c7218da15f2173398d8d73698ecb210e599d02f6"
+ ],
+ "hooks/relations/openstack-integration/make_docs": [
+ "interface:openstack-integration",
+ "static",
+ "a564aac288cc0bf4ff14418a341f11b065988c2b64adf93ec451e09dd92dcea5"
+ ],
+ "hooks/relations/openstack-integration/provides.py": [
+ "interface:openstack-integration",
+ "static",
+ "ad09fc79fa5eb7a142477d5bf7f48b53f6ede389708de0bb297c6d009aba502b"
+ ],
+ "hooks/relations/openstack-integration/pydocmd.yml": [
+ "interface:openstack-integration",
+ "static",
+ "3568f8a3c1446dfd736f31050e2b470bf125cc41717d156a4b866c7ea53861be"
+ ],
+ "hooks/relations/openstack-integration/requires.py": [
+ "interface:openstack-integration",
+ "static",
+ "a15f5a7ffa2391f75da6bde0007700ee75f058e62430924312ff39efc6ecea6b"
+ ],
+ "hooks/relations/tls-certificates/.gitignore": [
+ "interface:tls-certificates",
+ "static",
+ "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc"
+ ],
+ "hooks/relations/tls-certificates/README.md": [
+ "interface:tls-certificates",
+ "static",
+ "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a"
+ ],
+ "hooks/relations/tls-certificates/__init__.py": [
+ "interface:tls-certificates",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/tls-certificates/docs/common.md": [
+ "interface:tls-certificates",
+ "static",
+ "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff"
+ ],
+ "hooks/relations/tls-certificates/docs/provides.md": [
+ "interface:tls-certificates",
+ "static",
+ "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe"
+ ],
+ "hooks/relations/tls-certificates/docs/requires.md": [
+ "interface:tls-certificates",
+ "static",
+ "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e"
+ ],
+ "hooks/relations/tls-certificates/interface.yaml": [
+ "interface:tls-certificates",
+ "static",
+ "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa"
+ ],
+ "hooks/relations/tls-certificates/make_docs": [
+ "interface:tls-certificates",
+ "static",
+ "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37"
+ ],
+ "hooks/relations/tls-certificates/provides.py": [
+ "interface:tls-certificates",
+ "static",
+ "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c"
+ ],
+ "hooks/relations/tls-certificates/pydocmd.yml": [
+ "interface:tls-certificates",
+ "static",
+ "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8"
+ ],
+ "hooks/relations/tls-certificates/requires.py": [
+ "interface:tls-certificates",
+ "static",
+ "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba"
+ ],
+ "hooks/relations/tls-certificates/tls_certificates_common.py": [
+ "interface:tls-certificates",
+ "static",
+ "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c"
+ ],
+ "hooks/relations/vsphere-integration/.gitignore": [
+ "interface:vsphere-integration",
+ "static",
+ "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8"
+ ],
+ "hooks/relations/vsphere-integration/LICENSE": [
+ "interface:vsphere-integration",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "hooks/relations/vsphere-integration/README.md": [
+ "interface:vsphere-integration",
+ "static",
+ "8de815f0f938cb8f58c536899ed87e55aac507a782093bd50d50bd3c1d6add1c"
+ ],
+ "hooks/relations/vsphere-integration/__init__.py": [
+ "interface:vsphere-integration",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/vsphere-integration/copyright": [
+ "interface:vsphere-integration",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "hooks/relations/vsphere-integration/docs/provides.md": [
+ "interface:vsphere-integration",
+ "static",
+ "daa3c44a2df6d774adc60bde1160f1e307129be9d696f018eab4a7e713ee775a"
+ ],
+ "hooks/relations/vsphere-integration/docs/requires.md": [
+ "interface:vsphere-integration",
+ "static",
+ "4e79bb1b151f1de63b423d39a6e1831efbb6f767fe5b84963162f62c6bbb9123"
+ ],
+ "hooks/relations/vsphere-integration/interface.yaml": [
+ "interface:vsphere-integration",
+ "static",
+ "20295b882dfb9a1750d8e988eaa3383cd3109fae510785ba4e415d7fa9b118af"
+ ],
+ "hooks/relations/vsphere-integration/make_docs": [
+ "interface:vsphere-integration",
+ "static",
+ "cd9d91049ee3c6e6148f4bd9204a34463dde905ce665cff25be014ffc1b81b89"
+ ],
+ "hooks/relations/vsphere-integration/provides.py": [
+ "interface:vsphere-integration",
+ "static",
+ "8ccb09c4a3009b59caea227ef40395fb063d3e8ce983338060fb59bbe74138c0"
+ ],
+ "hooks/relations/vsphere-integration/pydocmd.yml": [
+ "interface:vsphere-integration",
+ "static",
+ "9f8eb566569977f10955da67def28886737e80914ae000e4acfae1313d08f105"
+ ],
+ "hooks/relations/vsphere-integration/requires.py": [
+ "interface:vsphere-integration",
+ "static",
+ "d56702f60037f06259752d3bd7882f7ee46f60a4ce7b6d1071520d69ec9351f9"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/vsphere-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "kubernetes-worker",
+ "static",
+ "0afa1f0d13a72b9df63559bc950bc0f5881984a591ea994990a45066db1e665b"
+ ],
+ "layer.yaml": [
+ "kubernetes-worker",
+ "dynamic",
+ "cd6bd7d1a594dbc399906e13d9e0af3b05234bbf92ef7e83c786b465fd652e43"
+ ],
+ "lib/charms/coordinator.py": [
+ "layer:coordinator",
+ "static",
+ "6dbacc87605be8efcbf19ec05341e4eb210327724495c79998a46947e034dbea"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/kubernetes_common.py": [
+ "layer:kubernetes-common",
+ "static",
+ "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101"
+ ],
+ "lib/charms/layer/nagios.py": [
+ "layer:nagios",
+ "static",
+ "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/snap.py": [
+ "layer:snap",
+ "static",
+ "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be"
+ ],
+ "lib/charms/layer/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e"
+ ],
+ "lib/charms/leadership.py": [
+ "layer:leadership",
+ "static",
+ "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
+ ],
+ "lib/debug_script.py": [
+ "layer:debug",
+ "static",
+ "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453"
+ ],
+ "lxd-profile.yaml": [
+ "kubernetes-worker",
+ "static",
+ "e62700f1993721652d83756f89e1f8b33c5d0dec6fb27554f61aaf96ccd4e379"
+ ],
+ "metadata.yaml": [
+ "kubernetes-worker",
+ "dynamic",
+ "087372af1fc0a5a22415a0517ee159e1ccae2c4ee67005ba8ac67db6eb0979af"
+ ],
+ "metrics.yaml": [
+ "kubernetes-worker",
+ "static",
+ "94a5eb0b0966f8ba434d91ff1e9b99b1b4c3b3044657b236d4e742d3e0d57c47"
+ ],
+ "reactive/__init__.py": [
+ "layer:coordinator",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/cdk_service_kicker.py": [
+ "layer:cdk-service-kicker",
+ "static",
+ "cc2648443016a18324ecb26acb71d69c71610ba23df235f280383552136f7efc"
+ ],
+ "reactive/coordinator.py": [
+ "layer:coordinator",
+ "static",
+ "18cda7ddf00ae0e47578d489fc3ebb376b4428cd0559797a87ddbead54360d02"
+ ],
+ "reactive/kubernetes_master_worker_base.py": [
+ "layer:kubernetes-master-worker-base",
+ "static",
+ "4a2d098f2e54f96b7ecef19b9485534b2da911a4a62104bd0efa40e4cb8bb519"
+ ],
+ "reactive/kubernetes_worker.py": [
+ "kubernetes-worker",
+ "static",
+ "75969472e7f0647befc465b3c2cd7010f239d9d51d1f61b491162353d6c326b3"
+ ],
+ "reactive/leadership.py": [
+ "layer:leadership",
+ "static",
+ "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
+ ],
+ "reactive/snap.py": [
+ "layer:snap",
+ "static",
+ "e4625ff4190ed33625f50d94343eda100871052ef133028f5f0ff1edfa5a23c3"
+ ],
+ "reactive/tls_client.py": [
+ "layer:tls-client",
+ "static",
+ "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8"
+ ],
+ "registry-configmap.yaml": [
+ "kubernetes-worker",
+ "static",
+ "1558fde27b806faefe57d0f6bf2a2a28fd5909501be6776fcb4b3e1242471fea"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "script/bootstrap": [
+ "kubernetes-worker",
+ "static",
+ "1985d9a07e8d764351530f6eb1b81bef6a4c035dc75422c03f4672ceaf1a4c18"
+ ],
+ "script/build": [
+ "kubernetes-worker",
+ "static",
+ "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633"
+ ],
+ "script/upload": [
+ "kubernetes-worker",
+ "static",
+ "aa13345e5f6873df26fb1705d1a1d51584fb32805329ac2d7a11f8ad7cbf4569"
+ ],
+ "setup.py": [
+ "layer:snap",
+ "static",
+ "b219c8c6cb138a2f70a8ef9136d1cc3fe6210bd1e28c99fccb5e7ae90d547164"
+ ],
+ "templates/cdk-service-kicker": [
+ "layer:cdk-service-kicker",
+ "static",
+ "b17adff995310e14d1b510337efa0af0531b55e2c487210168829e0dc1a6f99b"
+ ],
+ "templates/cdk-service-kicker.service": [
+ "layer:cdk-service-kicker",
+ "static",
+ "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029"
+ ],
+ "templates/default-http-backend.yaml": [
+ "kubernetes-worker",
+ "static",
+ "e31bb19574e6f23bb89cc30475d5e8b41fa96a8f67aa0b2d01316902584fa4e4"
+ ],
+ "templates/ingress-daemon-set.yaml": [
+ "kubernetes-worker",
+ "static",
+ "e3fd0181d69058134f6afbd18374be0822e518f8c169d052d041f2a342974c09"
+ ],
+ "templates/microbot-example.yaml": [
+ "kubernetes-worker",
+ "static",
+ "fb8feb88979eb5d0cfcbf9a5169387667a2224c72b0aae7f01310caa8c094ebe"
+ ],
+ "templates/nagios_plugin.py": [
+ "kubernetes-worker",
+ "static",
+ "636ca61f46749a762e165e9b13fc3da8823133af5c98b90dd33e7365171b84a4"
+ ],
+ "templates/nfs-provisioner.yaml": [
+ "kubernetes-worker",
+ "static",
+ "e3ee7c995c9a3624daffdc9a09467e9e274b38a4bb6c3851d928bf7bf1151fac"
+ ],
+ "templates/registry.yaml": [
+ "kubernetes-worker",
+ "static",
+ "75ef1f3d765a94e8b0c19a5e63ebf5df6788f91dbbecc4bf587695fecd63da87"
+ ],
+ "tox.ini": [
+ "layer:snap",
+ "static",
+ "4db933f2c03cda5e330db64806f0a06f6f6bc608e0db88c9c74b7171fc054ad1"
+ ],
+ "version": [
+ "kubernetes-worker",
+ "dynamic",
+ "941f7f7d1878f1ab5754c308ddd1eba3c788e87c6350872cbc1be0863de33549"
+ ],
+ "wheelhouse.txt": [
+ "kubernetes-worker",
+ "dynamic",
+ "6994129978e2e9b71815776a834a53cda7889390700c33c5478d5546d083dc3b"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.21.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/charms.templating.jinja2-1.0.2.tar.gz": [
+ "kubernetes-worker",
+ "dynamic",
+ "8193c6a1d40bdb66fe272c359b4e4780501c658acfaf2b1118c4230927815fe2"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.15.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
+ ],
+ "wheelhouse/tenacity-7.0.0.tar.gz": [
+ "layer:snap",
+ "dynamic",
+ "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/kubernetes-worker/.github/workflows/build.yml b/kubernetes-worker/.github/workflows/build.yml
new file mode 100644
index 0000000..f3a6191
--- /dev/null
+++ b/kubernetes-worker/.github/workflows/build.yml
@@ -0,0 +1,16 @@
+name: Builds kubernetes-worker charm
+on: [push, pull_request]
+
+jobs:
+ build:
+ name: Build charm
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+ - name: Run build
+ run: |
+ make charm
diff --git a/kubernetes-worker/.gitignore b/kubernetes-worker/.gitignore
new file mode 100644
index 0000000..15deb39
--- /dev/null
+++ b/kubernetes-worker/.gitignore
@@ -0,0 +1,5 @@
+.tox/
+__pycache__/
+*.pyc
+placeholders/
+*.tgz
diff --git a/kubernetes-worker/.travis.yml b/kubernetes-worker/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/kubernetes-worker/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-worker/.travis/profile-update.yaml b/kubernetes-worker/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/kubernetes-worker/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/kubernetes-worker/CONTRIBUTING.md b/kubernetes-worker/CONTRIBUTING.md
new file mode 100644
index 0000000..3de561f
--- /dev/null
+++ b/kubernetes-worker/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, please use the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-kubernetes-worker/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+## Documentation
+
+Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
+See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-kubernetes-worker.md)
diff --git a/kubernetes-worker/HACKING.md b/kubernetes-worker/HACKING.md
new file mode 100644
index 0000000..28e380c
--- /dev/null
+++ b/kubernetes-worker/HACKING.md
@@ -0,0 +1,25 @@
+ # Kubernetes Worker
+
+### Building from the layer
+
+You can clone the kubernetes-worker layer with git and build locally if you
+have the charm package/snap installed.
+
+```shell
+# Instal the snap
+sudo snap install charm --channel=edge
+
+# Set the build environment
+export JUJU_REPOSITORY=$HOME
+
+# Clone the layer and build it to our JUJU_REPOSITORY
+git clone https://github.com/juju-solutions/kubernetes
+cd kubernetes/cluster/juju/layers/kubernetes-worker
+charm build -r
+```
+
+### Contributing
+
+TBD
+
+
diff --git a/kubernetes-worker/LICENSE b/kubernetes-worker/LICENSE
new file mode 100644
index 0000000..0543093
--- /dev/null
+++ b/kubernetes-worker/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright Canonical, Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/kubernetes-worker/Makefile b/kubernetes-worker/Makefile
new file mode 100644
index 0000000..3f0429f
--- /dev/null
+++ b/kubernetes-worker/Makefile
@@ -0,0 +1,18 @@
+CHANNEL ?= unpublished
+CHARM := kubernetes-worker
+
+setup-env:
+ bash script/bootstrap
+
+charm: setup-env
+ bash script/build
+
+upload:
+ifndef NAMESPACE
+ $(error NAMESPACE is not set)
+endif
+
+ env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload
+
+.phony: charm upload setup-env
+all: charm
diff --git a/kubernetes-worker/README.md b/kubernetes-worker/README.md
new file mode 100644
index 0000000..0bf63b3
--- /dev/null
+++ b/kubernetes-worker/README.md
@@ -0,0 +1,22 @@
+# Kubernetes Worker
+
+## Usage
+
+This charm deploys a container runtime, and additionally stands up the Kubernetes
+worker applications: kubelet, and kube-proxy.
+
+In order for this charm to be useful, it should be deployed with its companion
+charm [kubernetes-master](https://jujucharms.com/u/containers/kubernetes-master)
+and linked with an SDN-Plugin and a container runtime such as
+[containerd](https://jaas.ai/u/containers/containerd).
+
+This charm is a component of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-kubernetes-worker).
+
+## Developers
+
+### Building the charm
+
+```
+make charm
+```
diff --git a/kubernetes-worker/actions.yaml b/kubernetes-worker/actions.yaml
new file mode 100644
index 0000000..5f774ac
--- /dev/null
+++ b/kubernetes-worker/actions.yaml
@@ -0,0 +1,108 @@
+"debug":
+ "description": "Collect debug data"
+"cis-benchmark":
+ "description": |
+ Run the CIS Kubernetes Benchmark against snap-based components.
+ "params":
+ "apply":
+ "type": "string"
+ "default": "none"
+ "description": |
+ Apply remediations to address benchmark failures. The default, 'none',
+ will not attempt to fix any reported failures. Set to 'conservative'
+ to resolve simple failures. Set to 'dangerous' to attempt to resolve
+ all failures.
+
+ Note: Applying any remediation may result in an unusable cluster.
+ "config":
+ "type": "string"
+ "default": "https://github.com/charmed-kubernetes/kube-bench-config/archive/cis-1.5.zip#sha1=811f21dbf6c841bafdbfbd8a21f912ad67582f46"
+ "description": |
+ Archive containing configuration files to use when running kube-bench.
+ The default value is known to be compatible with snap components. When
+ using a custom URL, append '#=' to verify the
+ archive integrity when downloaded.
+ "release":
+ "type": "string"
+ "default": "https://github.com/aquasecurity/kube-bench/releases/download/v0.3.1/kube-bench_0.3.1_linux_amd64.tar.gz#sha256=6616f1373987259285e2f676a225d4a3885cd62b7e7a116102ff2fb445724281"
+ "description": |
+ Archive containing the 'kube-bench' binary to run. The default value
+ points to a stable upstream release. When using a custom URL, append
+ '#=' to verify the archive integrity when
+ downloaded.
+
+ This may also be set to the special keyword 'upstream'. In this case,
+ the action will compile and use a local kube-bench binary built from
+ the master branch of the upstream repository:
+ https://github.com/aquasecurity/kube-bench
+
+"pause":
+ "description": |
+ Mark the node as unschedulable to prevent new pods from arriving, and
+ evict existing pods.
+ "params":
+ "delete-local-data":
+ "type": "boolean"
+ "description": |
+ Continue even if there are pods using emptyDir (local data that will
+ be deleted when the node is drained).
+ "default": !!bool "false"
+ "force":
+ "type": "boolean"
+ "description": |
+ Continue even if there are pods not managed by a
+ ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet.
+ "default": !!bool "false"
+"resume":
+ "description": |
+ Mark node as schedulable.
+"microbot":
+ "description": "Launch microbot containers"
+ "params":
+ "delete":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": "Remove a microbots deployment, service, and ingress if True."
+ "registry":
+ "type": "string"
+ "default": "rocks.canonical.com:443/cdk"
+ "description": "Registry to use for the microbot image."
+ "replicas":
+ "type": "integer"
+ "default": !!int "3"
+ "description": "Number of microbots to launch in Kubernetes."
+"upgrade":
+ "description": "Upgrade the kubernetes snaps"
+"registry":
+ "description": |
+ Create a private Docker registry.
+ DEPRECATED: See https://ubuntu.com/kubernetes/docs/docker-registry
+ "params":
+ "htpasswd":
+ "type": "string"
+ "description": "base64 encoded htpasswd file used for authentication."
+ "htpasswd-plain":
+ "type": "string"
+ "description": "base64 encoded plaintext version of the htpasswd file, needed\
+ \ by docker daemons to authenticate to the registry."
+ "tlscert":
+ "type": "string"
+ "description": "base64 encoded TLS certificate for the registry. Common Name\
+ \ must match the domain name of the registry."
+ "tlskey":
+ "type": "string"
+ "description": "base64 encoded TLS key for the registry."
+ "domain":
+ "type": "string"
+ "description": "The domain name for the registry. Must match the Common Name\
+ \ of the certificate."
+ "ingress":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": "Create an Ingress resource for the registry (or delete resource\
+ \ object if \"delete\" is True)"
+ "delete":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": "Remove a registry replication controller, service, and ingress\
+ \ if True."
diff --git a/kubernetes-worker/actions/cis-benchmark b/kubernetes-worker/actions/cis-benchmark
new file mode 100755
index 0000000..3f91dea
--- /dev/null
+++ b/kubernetes-worker/actions/cis-benchmark
@@ -0,0 +1,371 @@
+#!/usr/local/sbin/charm-env python3
+import os
+import json
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+from pathlib import Path
+
+import charms.layer
+import charms.reactive
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler
+from charms.layer import snap
+from charms.reactive import clear_flag, is_flag_set, set_flag
+
+
+BENCH_HOME = '/home/ubuntu/kube-bench'
+BENCH_BIN = '{}/kube-bench'.format(BENCH_HOME)
+BENCH_CFG = '{}/cfg-ck'.format(BENCH_HOME)
+GO_PKG = 'github.com/aquasecurity/kube-bench'
+RESULTS_DIR = '/home/ubuntu/kube-bench-results'
+
+# Remediation dicts associate a failing test with a tuple to fix it.
+# Conservative fixes will probably leave the cluster in a good state.
+# Dangerous fixes will likely break the cluster.
+# Tuple examples:
+# {'1.2.3': ('manual -- we don't know how to auto fix this', None, None)}
+# {'1.2.3': ('cli', 'command to run', None)}
+# {'1.2.3': ('kv', 'snap', {cfg_key: value})}
+CONSERVATIVE = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.21': ('kv', 'kube-apiserver', {'profiling': 'false'}),
+ '1.2.23': ('kv', 'kube-apiserver', {'audit-log-maxage': '30'}),
+ '1.2.24': ('kv', 'kube-apiserver', {'audit-log-maxbackup': '10'}),
+ '1.3.1': ('kv', 'kube-controller-manager', {'terminated-pod-gc-threshold': '500'}),
+ '1.3.2': ('kv', 'kube-controller-manager', {'profiling': 'false'}),
+ '1.4.1': ('kv', 'kube-scheduler', {'profiling': 'false'}),
+
+ # k8s-worker
+ '4.2.2': ('kv', 'kubelet', {'authorization-mode': 'Webhook'}),
+ '4.2.4': ('kv', 'kubelet', {'read-only-port': '0'}),
+ '4.2.6': ('kv', 'kubelet', {'protect-kernel-defaults': 'true'}),
+}
+ADMISSION_PLUGINS = {'enable-admission-plugins': ('PersistentVolumeLabel',
+ 'PodSecurityPolicy,'
+ 'ServiceAccount,'
+ 'NodeRestriction')}
+DANGEROUS = {
+ '0.0.0': ('cli', 'echo "this is fine"', None),
+
+ # etcd (no known failures with a default install)
+
+ # k8s-master
+ '1.2.2': ('kv', 'kube-apiserver', {'basic-auth-file': None}),
+ '1.2.3': ('kv', 'kube-apiserver', {'token-auth-file': None}),
+ '1.2.7': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.8': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.9': ('kv', 'kube-apiserver', {'authorization-mode': 'RBAC,Node'}),
+ '1.2.14': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.16': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.17': ('kv', 'kube-apiserver', ADMISSION_PLUGINS),
+ '1.2.18': ('kv', 'kube-apiserver', {'insecure-bind-address': None}),
+ '1.2.19': ('kv', 'kube-apiserver', {'insecure-port': '0'}),
+ '1.2.33': ('manual', None, None),
+ '1.3.6': ('kv', 'kube-controller-manager',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+
+ # k8s-worker
+ '4.2.12': ('kv', 'kubelet',
+ {'feature-gates': 'RotateKubeletServerCertificate=true'}),
+}
+
+
+def _fail(msg):
+ '''Fail the action with a given message.'''
+ hookenv.action_fail(msg)
+ sys.exit()
+
+
+def _move_matching_parent(dirpath, filename, dest):
+ '''Move a parent directory that contains a specific file.
+
+ Helper function that walks a directory looking for a given file. If found,
+ the file's parent directory is moved to the given destination.
+
+ :param: dirpath: String path to search
+ :param: filename: String file to find
+ :param: dest: String destination of the found parent directory
+ '''
+ for root, _, files in os.walk(dirpath):
+ for name in files:
+ if name == filename:
+ hookenv.log('Moving {} to {}'.format(root, dest))
+ shutil.move(root, dest)
+ return
+ else:
+ _fail('Could not find {} in {}'.format(filename, dirpath))
+
+
+def _restart_charm():
+ '''Set charm-specific flags and call reactive.main().'''
+ app = hookenv.charm_name() or 'unknown'
+ if 'master' in app:
+ hookenv.log('Restarting master')
+ clear_flag('kubernetes-master.components.started')
+ elif 'worker' in app:
+ hookenv.log('Restarting worker')
+ set_flag('kubernetes-worker.restart-needed')
+ elif 'etcd' in app:
+ hookenv.log('No-op: etcd does not need to be restarted')
+ return
+ else:
+ _fail('Unable to determine the charm to restart: {}'.format(app))
+
+ # Invoke reactive so the charm will react to the flags we just managed
+ charms.layer.import_layer_libs()
+ charms.reactive.main()
+
+
+def install(release, config):
+ '''Install kube-bench and related configuration.
+
+ Release and configuration are set via action params. If installing an
+ upstream release, this method will also install 'go' if needed.
+
+ :param: release: Archive URI or 'upstream'
+ :param: config: Archive URI of configuration files
+ '''
+ if Path(BENCH_HOME).exists():
+ shutil.rmtree(BENCH_HOME)
+ fetcher = ArchiveUrlFetchHandler()
+
+ if release == 'upstream':
+ Path(BENCH_HOME).mkdir(parents=True, exist_ok=True)
+
+ # Setup the 'go' environment
+ env = os.environ.copy()
+ go_bin = shutil.which('go', path='{}:/snap/bin'.format(env['PATH']))
+ if not go_bin:
+ snap.install('go', channel='stable', classic=True)
+ go_bin = '/snap/bin/go'
+ go_cache = os.getenv('GOCACHE', '/var/snap/go/common/cache')
+ go_path = os.getenv('GOPATH', '/var/snap/go/common')
+ env['GOCACHE'] = go_cache
+ env['GOPATH'] = go_path
+ Path(go_path).mkdir(parents=True, exist_ok=True)
+
+ # From https://github.com/aquasecurity/kube-bench#installing-from-sources
+ go_cmd = ('{bin} get {pkg} '
+ 'github.com/golang/dep/cmd/dep'.format(bin=go_bin, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+
+ go_cmd = ('{bin} build -o {out} {base}/src/{pkg}'.format(
+ bin=go_bin, out=BENCH_BIN, base=go_path, pkg=GO_PKG))
+ try:
+ subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(go_cmd))
+ else:
+ # Fetch the release URI and put it in the right place.
+ archive_path = fetcher.install(source=release)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'kube-bench' belongs in our BENCH_HOME.
+ _move_matching_parent(
+ dirpath=archive_path, filename='kube-bench', dest=BENCH_HOME)
+
+ # Fetch the config URI and put it in the right place.
+ archive_dir = fetcher.install(source=config)
+ # NB: We may not know the structure of the archive, but we know the
+ # directory containing 'config.yaml' belongs in our BENCH_CFG.
+ _move_matching_parent(
+ dirpath=archive_dir, filename='config.yaml', dest=BENCH_CFG)
+
+
+def apply(remediations=None):
+ '''Apply remediations to address benchmark failures.
+
+ :param: remediations: either 'conservative' or 'dangerous'
+ '''
+ applied_fixes = 0
+ danger = True if remediations == 'dangerous' else False
+ db = unitdata.kv()
+
+ json_log = report(log_format='json')
+ hookenv.log('Loading JSON from: {}'.format(json_log))
+ try:
+ with open(json_log, 'r') as f:
+ full_json = json.load(f)
+ except Exception:
+ _fail('Failed to load: {}'.format(json_log))
+
+ for test in full_json.get('tests', {}):
+ for result in test.get('results', {}):
+ test_num = result.get('test_number')
+ test_remediation = result.get('remediation')
+ test_status = result.get('status', '')
+
+ if test_status.lower() == 'fail':
+ test_remedy = CONSERVATIVE.get(test_num)
+ if not test_remedy and danger:
+ # no conservative remedy, check dangerous if user wants
+ test_remedy = DANGEROUS.get(test_num)
+ if isinstance(test_remedy, tuple):
+ if test_remedy[0] == 'manual':
+ # we don't know how to autofix; log remediation text
+ hookenv.log('Test {}: unable to auto-apply remedy.\n'
+ 'Manual steps:\n{}'.format(test_num,
+ test_remediation))
+ elif test_remedy[0] == 'cli':
+ cmd = shlex.split(test_remedy[1])
+ try:
+ out = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError:
+ _fail('Test {}: failed to run: {}'.format(test_num, cmd))
+ else:
+ hookenv.log('Test {}: applied remedy: {}\n'
+ 'Output: {}'.format(test_num, cmd, out))
+ applied_fixes += 1
+ elif test_remedy[0] == 'kv':
+ cfg_key = 'cis-' + test_remedy[1]
+ cfg = db.get(cfg_key) or {}
+ cfg.update(test_remedy[2])
+ db.set(cfg_key, cfg)
+
+ hookenv.log('Test {}: updated configuration: {}\n'.format(
+ test_num, cfg))
+ applied_fixes += 1
+ else:
+ hookenv.log('Test {}: remediation is missing'.format(test_num))
+
+ # CLI and KV changes will require a charm restart; do it.
+ if applied_fixes > 0:
+ _restart_charm()
+
+ msg = ('Applied {} remediations. Re-run with "apply=none" to generate a '
+ 'new report.').format(applied_fixes)
+ hookenv.action_set({'summary': msg})
+
+
+def reset():
+ '''Reset any remediations we applied to unitdata.kv().
+
+ This action does not track individual remediations to reset. Therefore,
+ this function unconditionally unsets all 'cis-' prefixed arguments that
+ this action may have set and restarts the relevant charm.
+ '''
+ db = unitdata.kv()
+
+ db.unset('cis-kube-apiserver')
+ db.unset('cis-kube-scheduler')
+ db.unset('cis-kube-controller-manager')
+ db.unset('cis-kubelet')
+ _restart_charm()
+
+ hookenv.action_set({'summary': ('Reset is complete. Re-run with '
+ '"apply=none" to generate a new report.')})
+
+
+def report(log_format='text'):
+ '''Run kube-bench and report results.
+
+ By default, save the full plain-text results to our RESULTS_DIR and set
+ action output with a summary. This function can also save full results in
+ a machine-friendly json format.
+
+ :param: log_format: String determines if output is text or json
+ :returns: Path to results log
+ '''
+ Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True)
+
+ # Node type is different depending on the charm
+ app = hookenv.charm_name() or 'unknown'
+ version = 'cis-1.5'
+ if 'master' in app:
+ target = 'master'
+ elif 'worker' in app:
+ target = 'node'
+ elif 'etcd' in app:
+ target = 'etcd'
+ else:
+ _fail('Unable to determine the target to benchmark: {}'.format(app))
+
+ # Commands and log names are different depending on the format
+ if log_format == 'json':
+ log_prefix = 'results-json-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} --json run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+ else:
+ log_prefix = 'results-text-'
+ verbose_cmd = ('{bin} -D {cfg} --benchmark {ver} run '
+ '--targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ summary_cmd = ('{bin} -D {cfg} --benchmark {ver} '
+ '--noremediations --noresults run --targets {target}').format(
+ bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target)
+
+ # Store full results for future consumption
+ with tempfile.NamedTemporaryFile(mode='w+b', prefix=log_prefix,
+ dir=RESULTS_DIR, delete=False) as res_file:
+ try:
+ subprocess.call(shlex.split(verbose_cmd), stdout=res_file)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(verbose_cmd))
+ else:
+ # remember the filename for later (and make it readable, why not?)
+ Path(res_file.name).chmod(0o644)
+ log = res_file.name
+
+ # When making a summary, we also have a verbose report. Set action output
+ # so operators can see everything related to this run.
+ try:
+ out = subprocess.check_output(shlex.split(summary_cmd),
+ universal_newlines=True)
+ except subprocess.CalledProcessError:
+ _fail('Failed to run: {}'.format(summary_cmd))
+ else:
+ fetch_cmd = 'juju scp {unit}:{file} .'.format(unit=hookenv.local_unit(),
+ file=log)
+ hookenv.action_set({'cmd': summary_cmd,
+ 'report': fetch_cmd,
+ 'summary': out})
+
+ return log or None
+
+
+if __name__ == '__main__':
+ if not (is_flag_set('snap.installed.etcd') or
+ is_flag_set('kubernetes-master.snaps.installed') or
+ is_flag_set('kubernetes-worker.snaps.installed')):
+ msg = 'Snaps are not yet installed on this unit.'
+ _fail(msg)
+
+ # Validate action params
+ release = hookenv.action_get('release') or 'upstream'
+ config = hookenv.action_get('config')
+ if not config:
+ msg = 'Missing "config" parameter'
+ _fail(msg)
+ remediations = hookenv.action_get('apply')
+ if remediations not in ['none', 'conservative', 'dangerous', 'reset']:
+ msg = 'Invalid "apply" parameter: {}'.format(remediations)
+ _fail(msg)
+
+ # TODO: may want an option to overwrite an existing install
+ if Path(BENCH_BIN).exists() and Path(BENCH_CFG).exists():
+ hookenv.log('{} exists; skipping install'.format(BENCH_HOME))
+ else:
+ hookenv.log('Installing benchmark from: {}'.format(release))
+ install(release, config)
+
+ # Reset, remediate, or report
+ if remediations == 'reset':
+ hookenv.log('Attempting to remove all remediations')
+ reset()
+ elif remediations != 'none':
+ hookenv.log('Applying "{}" remediations'.format(remediations))
+ apply(remediations)
+ else:
+ hookenv.log('Report only; no remediations were requested')
+ report(log_format='text')
diff --git a/kubernetes-worker/actions/debug b/kubernetes-worker/actions/debug
new file mode 100755
index 0000000..8ba160e
--- /dev/null
+++ b/kubernetes-worker/actions/debug
@@ -0,0 +1,102 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import tarfile
+import tempfile
+import traceback
+from contextlib import contextmanager
+from datetime import datetime
+from charmhelpers.core.hookenv import action_set, local_unit
+
+archive_dir = None
+log_file = None
+
+
+@contextmanager
+def archive_context():
+ """ Open a context with a new temporary directory.
+
+ When the context closes, the directory is archived, and the archive
+ location is added to Juju action output. """
+ global archive_dir
+ global log_file
+ with tempfile.TemporaryDirectory() as temp_dir:
+ name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S")
+ archive_dir = os.path.join(temp_dir, name)
+ os.makedirs(archive_dir)
+ with open("%s/debug.log" % archive_dir, "w") as log_file:
+ yield
+ os.chdir(temp_dir)
+ tar_path = "/home/ubuntu/%s.tar.gz" % name
+ with tarfile.open(tar_path, "w:gz") as f:
+ f.add(name)
+ action_set({
+ "path": tar_path,
+ "command": "juju scp %s:%s ." % (local_unit(), tar_path),
+ "message": " ".join([
+ "Archive has been created on unit %s." % local_unit(),
+ "Use the juju scp command to copy it to your local machine."
+ ])
+ })
+
+
+def log(msg):
+ """ Log a message that will be included in the debug archive.
+
+ Must be run within archive_context """
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ for line in str(msg).splitlines():
+ log_file.write(timestamp + " | " + line.rstrip() + "\n")
+
+
+def run_script(script):
+ """ Run a single script. Must be run within archive_context """
+ log("Running script: " + script)
+ script_dir = os.path.join(archive_dir, script)
+ os.makedirs(script_dir)
+ env = os.environ.copy()
+ env["PYTHONPATH"] = "lib" # allow same imports as reactive code
+ env["DEBUG_SCRIPT_DIR"] = script_dir
+ with open(script_dir + "/stdout", "w") as stdout:
+ with open(script_dir + "/stderr", "w") as stderr:
+ process = subprocess.Popen(
+ "debug-scripts/" + script,
+ stdout=stdout, stderr=stderr, env=env
+ )
+ try:
+ exit_code = process.wait(timeout=300)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, terminating")
+ process.terminate()
+ try:
+ exit_code = process.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ log("ERROR: still running, killing")
+ process.kill()
+ exit_code = process.wait(timeout=10)
+ if exit_code != 0:
+ log("ERROR: %s failed with exit code %d" % (script, exit_code))
+
+
+def run_all_scripts():
+ """ Run all scripts. For the sake of robustness, log and ignore any
+ exceptions that occur.
+
+ Must be run within archive_context """
+ scripts = os.listdir("debug-scripts")
+ for script in scripts:
+ try:
+ run_script(script)
+ except:
+ log(traceback.format_exc())
+
+
+def main():
+ """ Open an archive context and run all scripts. """
+ with archive_context():
+ run_all_scripts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/kubernetes-worker/actions/microbot b/kubernetes-worker/actions/microbot
new file mode 100755
index 0000000..5a3d6db
--- /dev/null
+++ b/kubernetes-worker/actions/microbot
@@ -0,0 +1,81 @@
+#!/usr/local/sbin/charm-env python3
+
+# Copyright 2015 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+from charmhelpers.core.hookenv import action_fail, action_get, action_set
+from charmhelpers.core.hookenv import unit_public_ip
+from charms.reactive import endpoint_from_flag
+from charms.templating.jinja2 import render
+from subprocess import call, check_output
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+context = {}
+context['delete'] = action_get('delete')
+context['public_address'] = unit_public_ip()
+context['registry'] = action_get('registry')
+context['replicas'] = action_get('replicas')
+
+arch = check_output(['dpkg', '--print-architecture']).rstrip()
+context['arch'] = arch.decode('utf-8')
+
+if not context['replicas']:
+ context['replicas'] = 3
+
+# Declare a kubectl template when invoking kubectl
+kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
+
+# Remove deployment if requested
+if context['delete']:
+ service_del = kubectl + ['delete', 'svc', 'microbot']
+ service_response = call(service_del)
+ deploy_del = kubectl + ['delete', 'deployment', 'microbot']
+ deploy_response = call(deploy_del)
+ ingress_del = kubectl + ['delete', 'ing', 'microbot-ingress']
+ ingress_response = call(ingress_del)
+
+ if ingress_response != 0:
+ action_set({'microbot-ing':
+ 'Failed removal of microbot ingress resource.'})
+ if deploy_response != 0:
+ action_set({'microbot-deployment':
+ 'Failed removal of microbot deployment resource.'})
+ if service_response != 0:
+ action_set({'microbot-service':
+ 'Failed removal of microbot service resource.'})
+ sys.exit(0)
+
+kube_control = endpoint_from_flag('kube-control.registry_location.available')
+if kube_control:
+ registry_location = kube_control.get_registry_location()
+ context['registry'] = registry_location
+
+# Creation request
+render('microbot-example.yaml', '/root/cdk/addons/microbot.yaml',
+ context)
+
+create_command = kubectl + ['apply', '-f',
+ '/root/cdk/addons/microbot.yaml']
+
+create_response = call(create_command)
+
+if create_response == 0:
+ action_set({'address':
+ 'microbot.{}.nip.io'.format(context['public_address'])})
+else:
+ action_fail('Failed to apply microbot manifest.')
diff --git a/kubernetes-worker/actions/pause b/kubernetes-worker/actions/pause
new file mode 100755
index 0000000..f3bcabb
--- /dev/null
+++ b/kubernetes-worker/actions/pause
@@ -0,0 +1,34 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+
+from charms.layer.kubernetes_common import (
+ get_node_name,
+ kubectl,
+)
+
+from charmhelpers.core.hookenv import (
+ action_fail,
+ action_get,
+ status_set,
+)
+
+# Make sure the kubectl snap can be found
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+drain_args = ['--ignore-daemonsets']
+
+if action_get('delete-local-data'):
+ drain_args.append('--delete-local-data=true')
+
+if action_get('force'):
+ drain_args.append('--force')
+
+try:
+ kubectl('drain', get_node_name(), *drain_args)
+except subprocess.CalledProcessError as e:
+ action_fail('{}. See unit logs for details.'.format(str(e)))
+ raise
+
+status_set('waiting', 'Kubernetes unit paused')
diff --git a/kubernetes-worker/actions/registry b/kubernetes-worker/actions/registry
new file mode 100755
index 0000000..eb95d05
--- /dev/null
+++ b/kubernetes-worker/actions/registry
@@ -0,0 +1,152 @@
+#!/usr/local/sbin/charm-env python3
+#
+# For a usage examples, see README.md
+#
+# TODO
+#
+# - make the action idempotent (i.e. if you run it multiple times, the first
+# run will create/delete the registry, and the reset will be a no-op and won't
+# error out)
+#
+# - take only a plain authentication file, and create the encrypted version in
+# the action
+#
+# - validate the parameters (make sure tlscert is a certificate, that tlskey is a
+# proper key, etc)
+#
+# - when https://bugs.launchpad.net/juju/+bug/1661015 is fixed, handle the
+# base64 encoding the parameters in the action itself
+
+import os
+import sys
+
+from base64 import b64encode
+
+from charmhelpers.core.hookenv import action_get
+from charmhelpers.core.hookenv import action_set
+from charms.templating.jinja2 import render
+from charms.reactive import endpoint_from_flag
+from subprocess import call, check_output
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+deletion = action_get('delete')
+
+context = {}
+
+arch = check_output(['dpkg', '--print-architecture']).rstrip()
+context['arch'] = arch.decode('utf-8')
+
+# This action was deprecated in 1.17.
+action_set({
+ 'notice':
+ ('DEPRECATED: See https://ubuntu.com/kubernetes/docs/docker-registry '
+ 'for supported container registry options.')
+})
+
+# These config options must be defined in the case of a creation
+param_error = False
+for param in ('tlscert', 'tlskey', 'domain', 'htpasswd', 'htpasswd-plain'):
+ value = action_get(param)
+ if not value and not deletion:
+ key = "registry-create-parameter-{}".format(param)
+ error = "failure, parameter {} is required".format(param)
+ action_set({key: error})
+ param_error = True
+
+ context[param] = value
+
+# Create the dockercfg template variable
+dockercfg = '{"%s": {"auth": "%s", "email": "root@localhost"}}' % \
+ (context['domain'], context['htpasswd-plain'])
+context['dockercfg'] = b64encode(dockercfg.encode()).decode('ASCII')
+
+if param_error:
+ sys.exit(0)
+
+# This one is either true or false, no need to check if it has a "good" value.
+context['ingress'] = action_get('ingress')
+
+# Declare a kubectl template when invoking kubectl
+kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
+
+# Remove deployment if requested
+if deletion:
+ resources = ['svc/kube-registry', 'rc/kube-registry-v0', 'secrets/registry-tls-data',
+ 'secrets/registry-auth-data', 'secrets/registry-access']
+
+ if action_get('ingress'):
+ resources.append('ing/registry-ing')
+
+ delete_command = kubectl + ['delete', '--ignore-not-found=true'] + resources
+ delete_response = call(delete_command)
+ if delete_response == 0:
+ action_set({'registry-delete': 'success'})
+ else:
+ action_set({'registry-delete': 'failure'})
+
+ sys.exit(0)
+
+kube_control = endpoint_from_flag('kube-control.registry_location.available')
+if kube_control:
+ registry_location = kube_control.get_registry_location()
+ context['registry'] = registry_location
+
+# Creation request
+render('registry.yaml', '/root/cdk/addons/registry.yaml',
+ context)
+
+create_command = kubectl + ['create', '-f',
+ '/root/cdk/addons/registry.yaml']
+
+create_response = call(create_command)
+
+if create_response == 0:
+ action_set({'registry-create': 'success'})
+
+ # Create a ConfigMap if it doesn't exist yet, else patch it.
+ # A ConfigMap is needed to change the default value for nginx' client_max_body_size.
+ # The default is 1MB, and this is the maximum size of images that can be
+ # pushed on the registry. 1MB images aren't useful, so we bump this value to 1024MB.
+ cm_name = 'nginx-load-balancer-conf'
+ check_cm_command = kubectl + ['get', 'cm', cm_name]
+ check_cm_response = call(check_cm_command)
+
+ if check_cm_response == 0:
+ # There is an existing ConfigMap, patch it
+ patch = '{"data":{"body-size":"1024m"}}'
+ patch_cm_command = kubectl + ['patch', 'cm', cm_name, '-p', patch]
+ patch_cm_response = call(patch_cm_command)
+
+ if patch_cm_response == 0:
+ action_set({'configmap-patch': 'success'})
+ else:
+ action_set({'configmap-patch': 'failure'})
+
+ else:
+ # No existing ConfigMap, create it
+ render('registry-configmap.yaml', '/root/cdk/addons/registry-configmap.yaml',
+ context)
+ create_cm_command = kubectl + ['create', '-f', '/root/cdk/addons/registry-configmap.yaml']
+ create_cm_response = call(create_cm_command)
+
+ if create_cm_response == 0:
+ action_set({'configmap-create': 'success'})
+ else:
+ action_set({'configmap-create': 'failure'})
+
+ # Patch the "default" serviceaccount with an imagePullSecret.
+ # This will allow the docker daemons to authenticate to our private
+ # registry automatically
+ patch = '{"imagePullSecrets":[{"name":"registry-access"}]}'
+ patch_sa_command = kubectl + ['patch', 'sa', 'default', '-p', patch]
+ patch_sa_response = call(patch_sa_command)
+
+ if patch_sa_response == 0:
+ action_set({'serviceaccount-patch': 'success'})
+ else:
+ action_set({'serviceaccount-patch': 'failure'})
+
+
+else:
+ action_set({'registry-create': 'failure'})
diff --git a/kubernetes-worker/actions/resume b/kubernetes-worker/actions/resume
new file mode 100755
index 0000000..a8b6422
--- /dev/null
+++ b/kubernetes-worker/actions/resume
@@ -0,0 +1,25 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+
+from charms.layer.kubernetes_common import (
+ get_node_name,
+ kubectl,
+)
+
+from charmhelpers.core.hookenv import (
+ action_fail,
+ status_set,
+)
+
+# make sure the kubectl snap can be found
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+
+try:
+ kubectl('uncordon', get_node_name())
+except subprocess.CalledProcessError as e:
+ action_fail('{}. See unit logs for details.'.format(str(e)))
+ raise
+
+status_set('active', 'Kubernetes unit resumed')
diff --git a/kubernetes-worker/actions/upgrade b/kubernetes-worker/actions/upgrade
new file mode 100755
index 0000000..a97c19b
--- /dev/null
+++ b/kubernetes-worker/actions/upgrade
@@ -0,0 +1,5 @@
+#!/bin/sh
+set -eux
+
+charms.reactive set_state kubernetes-worker.snaps.upgrade-specified
+exec hooks/config-changed
diff --git a/kubernetes-worker/bin/charm-env b/kubernetes-worker/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/kubernetes-worker/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/kubernetes-worker/bin/layer_option b/kubernetes-worker/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/kubernetes-worker/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/kubernetes-worker/build-cni-resources.sh b/kubernetes-worker/build-cni-resources.sh
new file mode 100755
index 0000000..40e602d
--- /dev/null
+++ b/kubernetes-worker/build-cni-resources.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -eux
+
+CNI_VERSION="${CNI_VERSION:-v0.7.5}"
+ARCH="${ARCH:-amd64 arm64 s390x}"
+
+build_script_commit="$(git show --oneline -q)"
+temp_dir="$(readlink -f build-cni-resources.tmp)"
+rm -rf "$temp_dir"
+mkdir "$temp_dir"
+(cd "$temp_dir"
+ git clone https://github.com/containernetworking/plugins.git cni-plugins \
+ --branch "$CNI_VERSION" \
+ --depth 1
+
+ # Grab the user id and group id of this current user.
+ GROUP_ID=$(id -g)
+ USER_ID=$(id -u)
+
+ for arch in $ARCH; do
+ echo "Building cni $CNI_VERSION for $arch"
+ rm -f cni-plugins/bin/*
+ docker run \
+ --rm \
+ -e GOOS=linux \
+ -e GOARCH="$arch" \
+ -v "$temp_dir"/cni-plugins:/cni \
+ golang:1.15 \
+ /bin/bash -c "cd /cni && ./build.sh && chown -R ${USER_ID}:${GROUP_ID} /cni"
+
+ (cd cni-plugins/bin
+ echo "cni-$arch $CNI_VERSION" >> BUILD_INFO
+ echo "Built $(date)" >> BUILD_INFO
+ echo "build script commit: $build_script_commit" >> BUILD_INFO
+ echo "cni-plugins commit: $(git show --oneline -q)" >> BUILD_INFO
+ tar -czf "$temp_dir/cni-$arch.tgz" .
+ )
+ done
+)
+mv "$temp_dir"/cni-*.tgz .
+rm -rf "$temp_dir"
diff --git a/kubernetes-worker/config.yaml b/kubernetes-worker/config.yaml
new file mode 100644
index 0000000..69008f3
--- /dev/null
+++ b/kubernetes-worker/config.yaml
@@ -0,0 +1,205 @@
+# Copyright 2016 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"options":
+ # snap_proxy and snap_proxy_url have been deprecated for some time.
+ # If your charm still needs them, add these config items manually
+ # to your charm's config.yaml.
+ # snap_proxy:
+ # description: >
+ # DEPRECATED. Use snap-http-proxy and snap-https-proxy model configuration settings.
+ # HTTP/HTTPS web proxy for Snappy to use when accessing the snap store.
+ # type: string
+ # default: ""
+ # snap_proxy_url:
+ # default: ""
+ # type: string
+ # description: >
+ # DEPRECATED. Use snap-store-proxy model configuration setting.
+ # The address of a Snap Store Proxy to use for snaps e.g. http://snap-proxy.example.com
+ "snapd_refresh":
+ "default": "max"
+ "type": "string"
+ "description": |
+ How often snapd handles updates for installed snaps. Setting an empty
+ string will check 4x per day. Set to "max" to delay the refresh as long
+ as possible. You may also set a custom string as described in the
+ 'refresh.timer' section here:
+ https://forum.snapcraft.io/t/system-options/87
+ "nagios_context":
+ "default": "juju"
+ "type": "string"
+ "description": |
+ Used by the nrpe subordinate charms.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ "nagios_servicegroups":
+ "default": ""
+ "type": "string"
+ "description": |
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
+ "sysctl":
+ "type": "string"
+ "default": "{ net.ipv4.conf.all.forwarding : 1, net.ipv4.neigh.default.gc_thresh1\
+ \ : 128, net.ipv4.neigh.default.gc_thresh2 : 28672, net.ipv4.neigh.default.gc_thresh3\
+ \ : 32768, net.ipv6.neigh.default.gc_thresh1 : 128, net.ipv6.neigh.default.gc_thresh2\
+ \ : 28672, net.ipv6.neigh.default.gc_thresh3 : 32768, fs.inotify.max_user_instances\
+ \ : 8192, fs.inotify.max_user_watches : 1048576, kernel.panic : 10, kernel.panic_on_oops:\
+ \ 1, vm.overcommit_memory : 1 }"
+ "description": |
+ YAML formatted associative array of sysctl values, e.g.:
+ '{kernel.pid_max : 4194303 }'. Note that kube-proxy handles
+ the conntrack settings. The proper way to alter them is to
+ use the proxy-extra-args config to set them, e.g.:
+ juju config kubernetes-master proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000"
+ juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000"
+ The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore
+ kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between
+ the setting of conntrack-max-per-core vs nf_conntrack_max.
+ "proxy-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kube-proxy. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kube-apiserver being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ "ingress":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ Deploy the default http backend and ingress controller to handle
+ ingress requests.
+
+ Set to false if deploying an alternate ingress controller, and note
+ that you may need to manually open ports 80 and 443 on the nodes:
+ juju run --application kubernetes-worker -- open-port 80 && open-port 443
+ "labels":
+ "type": "string"
+ "default": ""
+ "description": |
+ Labels can be used to organize and to select subsets of nodes in the
+ cluster. Declare node labels in key=value format, separated by spaces.
+ "allow-privileged":
+ "type": "string"
+ "default": "true"
+ "description": |
+ This option is now deprecated and has no effect.
+ "channel":
+ "type": "string"
+ "default": "1.21/stable"
+ "description": |
+ Snap channel to install Kubernetes worker services from
+ "require-manual-upgrade":
+ "type": "boolean"
+ "default": !!bool "true"
+ "description": |
+ When true, worker services will not be upgraded until the user triggers
+ it manually by running the upgrade action.
+ "kubelet-extra-args":
+ "type": "string"
+ "default": ""
+ "description": |
+ Space separated list of flags and key=value pairs that will be passed as arguments to
+ kubelet. For example a value like this:
+ runtime-config=batch/v2alpha1=true profiling=true
+ will result in kubelet being run with the following options:
+ --runtime-config=batch/v2alpha1=true --profiling=true
+ Note: As of Kubernetes 1.10.x, many of Kubelet's args have been deprecated, and can
+ be set with kubelet-extra-config instead.
+ "ingress-default-ssl-certificate":
+ "type": "string"
+ "default": ""
+ "description": |
+ SSL certificate to be used by the default HTTPS server. If one of the
+ flag ingress-default-ssl-certificate or ingress-default-ssl-key is not
+ provided ingress will use a self-signed certificate. This parameter is
+ specific to nginx-ingress-controller.
+ "ingress-default-ssl-key":
+ "type": "string"
+ "default": ""
+ "description": |
+ Private key to be used by the default HTTPS server. If one of the flag
+ ingress-default-ssl-certificate or ingress-default-ssl-key is not
+ provided ingress will use a self-signed certificate. This parameter is
+ specific to nginx-ingress-controller.
+ "ingress-ssl-passthrough":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Enable ssl passthrough on ingress server. This allows passing the ssl
+ connection through to the workloads and not terminating it at the ingress
+ controller.
+ "ingress-ssl-chain-completion":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ Enable chain completion for TLS certificates used by the nginx ingress
+ controller. Set this to true if you would like the ingress controller
+ to attempt auto-retrieval of intermediate certificates. The default
+ (false) is recommended for all production kubernetes installations, and
+ any environment which does not have outbound Internet access.
+ "ingress-use-forwarded-headers":
+ "type": "boolean"
+ "default": !!bool "false"
+ "description": |
+ If true, NGINX passes the incoming X-Forwarded-* headers to upstreams. Use this
+ option when NGINX is behind another L7 proxy / load balancer that is setting
+ these headers.
+
+ If false, NGINX ignores incoming X-Forwarded-* headers, filling them with the
+ request information it sees. Use this option if NGINX is exposed directly to
+ the internet, or it's behind a L3/packet-based load balancer that doesn't alter
+ the source IP in the packets.
+
+ Reference: https://github.com/kubernetes/ingress-nginx/blob/a9c706be12a8be418c49ab1f60a02f52f9b14e55/
+ docs/user-guide/nginx-configuration/configmap.md#use-forwarded-headers.
+ "nginx-image":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Docker image to use for the nginx ingress controller. Using "auto" will select
+ an image based on architecture.
+
+ Example:
+ quay.io/kubernetes-ingress-controller/nginx-ingress-controller-amd64:0.32.0
+ "default-backend-image":
+ "type": "string"
+ "default": "auto"
+ "description": |
+ Docker image to use for the default backend. Auto will select an image
+ based on architecture.
+ "kubelet-extra-config":
+ "default": "{}"
+ "type": "string"
+ "description": |
+ Extra configuration to be passed to kubelet. Any values specified in this
+ config will be merged into a KubeletConfiguration file that is passed to
+ the kubelet service via the --config flag. This can be used to override
+ values provided by the charm.
+
+ Requires Kubernetes 1.10+.
+
+ The value for this config must be a YAML mapping that can be safely
+ merged with a KubeletConfiguration file. For example:
+ {evictionHard: {memory.available: 200Mi}}
+
+ For more information about KubeletConfiguration, see upstream docs:
+ https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
diff --git a/kubernetes-worker/copyright b/kubernetes-worker/copyright
new file mode 100644
index 0000000..ac5e525
--- /dev/null
+++ b/kubernetes-worker/copyright
@@ -0,0 +1,13 @@
+Copyright 2016 The Kubernetes Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/copyright.layer-basic b/kubernetes-worker/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/copyright.layer-coordinator b/kubernetes-worker/copyright.layer-coordinator
new file mode 100644
index 0000000..b8518aa
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-coordinator
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Coordinator Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubernetes-worker/copyright.layer-leadership b/kubernetes-worker/copyright.layer-leadership
new file mode 100644
index 0000000..08b983f
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-leadership
@@ -0,0 +1,15 @@
+Copyright 2015-2016 Canonical Ltd.
+
+This file is part of the Leadership Layer for Juju.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
diff --git a/kubernetes-worker/copyright.layer-metrics b/kubernetes-worker/copyright.layer-metrics
new file mode 100644
index 0000000..2df15bd
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-metrics
@@ -0,0 +1,13 @@
+Copyright 2016 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/copyright.layer-nagios b/kubernetes-worker/copyright.layer-nagios
new file mode 100644
index 0000000..c80db95
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-nagios
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2016, Canonical Ltd.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 3, as
+ published by the Free Software Foundation.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranties of
+ MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
diff --git a/kubernetes-worker/copyright.layer-options b/kubernetes-worker/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/copyright.layer-snap b/kubernetes-worker/copyright.layer-snap
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-worker/copyright.layer-snap
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/debug-scripts/charm-unitdata b/kubernetes-worker/debug-scripts/charm-unitdata
new file mode 100755
index 0000000..d2aac60
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/charm-unitdata
@@ -0,0 +1,12 @@
+#!/usr/local/sbin/charm-env python3
+
+import debug_script
+import json
+from charmhelpers.core import unitdata
+
+kv = unitdata.kv()
+data = kv.getrange("")
+
+with debug_script.open_file("unitdata.json", "w") as f:
+ json.dump(data, f, indent=2)
+ f.write("\n")
diff --git a/kubernetes-worker/debug-scripts/filesystem b/kubernetes-worker/debug-scripts/filesystem
new file mode 100755
index 0000000..c5ec6d8
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/filesystem
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -ux
+
+# report file system disk space usage
+df -hT > $DEBUG_SCRIPT_DIR/df-hT
+# estimate file space usage
+du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h
+# list the mounted filesystems
+mount > $DEBUG_SCRIPT_DIR/mount
+# list the mounted systems with ascii trees
+findmnt -A > $DEBUG_SCRIPT_DIR/findmnt
+# list block devices
+lsblk > $DEBUG_SCRIPT_DIR/lsblk
+# list open files
+lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof
+# list local system locks
+lslocks > $DEBUG_SCRIPT_DIR/lslocks
diff --git a/kubernetes-worker/debug-scripts/inotify b/kubernetes-worker/debug-scripts/inotify
new file mode 100755
index 0000000..350e20f
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/inotify
@@ -0,0 +1,8 @@
+#!/bin/sh
+set -ux
+
+# We had to bump inotify limits once in the past, hence why this oddly specific
+# script lives here in kubernetes-worker.
+
+sysctl fs.inotify > $DEBUG_SCRIPT_DIR/sysctl-limits
+ls -l /proc/*/fd/* | grep inotify > $DEBUG_SCRIPT_DIR/inotify-instances
diff --git a/kubernetes-worker/debug-scripts/juju-logs b/kubernetes-worker/debug-scripts/juju-logs
new file mode 100755
index 0000000..d27c458
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/juju-logs
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR
diff --git a/kubernetes-worker/debug-scripts/juju-network-get b/kubernetes-worker/debug-scripts/juju-network-get
new file mode 100755
index 0000000..983c8c4
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/juju-network-get
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import subprocess
+import yaml
+import debug_script
+
+with open('metadata.yaml') as f:
+ metadata = yaml.load(f)
+
+relations = []
+for key in ['requires', 'provides', 'peers']:
+ relations += list(metadata.get(key, {}).keys())
+
+os.mkdir(os.path.join(debug_script.dir, 'relations'))
+
+for relation in relations:
+ path = 'relations/' + relation
+ with debug_script.open_file(path, 'w') as f:
+ cmd = ['network-get', relation]
+ subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
diff --git a/kubernetes-worker/debug-scripts/kubectl b/kubernetes-worker/debug-scripts/kubectl
new file mode 100755
index 0000000..1192c3c
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/kubectl
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -ux
+
+export PATH=$PATH:/snap/bin
+
+alias kubectl="kubectl --kubeconfig=/root/cdk/kubeconfig"
+
+kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info
+kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump
+for obj in pods svc ingress secrets pv pvc rc; do
+ kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj
+done
+for obj in nodes; do
+ kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj
+done
diff --git a/kubernetes-worker/debug-scripts/kubernetes-worker-services b/kubernetes-worker/debug-scripts/kubernetes-worker-services
new file mode 100755
index 0000000..4f9dfa2
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/kubernetes-worker-services
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+for service in kubelet kube-proxy; do
+ systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status
+ journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal
+done
+
+# FIXME: get the snap config or something
diff --git a/kubernetes-worker/debug-scripts/network b/kubernetes-worker/debug-scripts/network
new file mode 100755
index 0000000..944a355
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/network
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -ux
+
+ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig
+cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf
+cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces
+netstat -planut > $DEBUG_SCRIPT_DIR/netstat
+route -n > $DEBUG_SCRIPT_DIR/route
+iptables-save > $DEBUG_SCRIPT_DIR/iptables-save
+dig google.com > $DEBUG_SCRIPT_DIR/dig-google
+ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google
diff --git a/kubernetes-worker/debug-scripts/packages b/kubernetes-worker/debug-scripts/packages
new file mode 100755
index 0000000..b60a9cf
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/packages
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -ux
+
+dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list
+snap list > $DEBUG_SCRIPT_DIR/snap-list
+pip2 list > $DEBUG_SCRIPT_DIR/pip2-list
+pip3 list > $DEBUG_SCRIPT_DIR/pip3-list
diff --git a/kubernetes-worker/debug-scripts/sysctl b/kubernetes-worker/debug-scripts/sysctl
new file mode 100755
index 0000000..a86a6c8
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/sysctl
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ux
+
+sysctl -a > $DEBUG_SCRIPT_DIR/sysctl
diff --git a/kubernetes-worker/debug-scripts/systemd b/kubernetes-worker/debug-scripts/systemd
new file mode 100755
index 0000000..8bb9b6f
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/systemd
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -ux
+
+systemctl --all > $DEBUG_SCRIPT_DIR/systemctl
+journalctl > $DEBUG_SCRIPT_DIR/journalctl
+systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time
+systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame
+systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain
+systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump
diff --git a/kubernetes-worker/debug-scripts/tls-certs b/kubernetes-worker/debug-scripts/tls-certs
new file mode 100755
index 0000000..2692e51
--- /dev/null
+++ b/kubernetes-worker/debug-scripts/tls-certs
@@ -0,0 +1,21 @@
+#!/usr/local/sbin/charm-env python3
+
+import os
+import shutil
+import traceback
+import debug_script
+from charms import layer
+
+options = layer.options.get('tls-client')
+
+def copy_cert(source_key, name):
+ try:
+ source = options[source_key]
+ dest = os.path.join(debug_script.dir, name)
+ shutil.copy(source, dest)
+ except Exception:
+ traceback.print_exc()
+
+copy_cert('client_certificate_path', 'client.crt')
+copy_cert('server_certificate_path', 'server.crt')
+copy_cert('ca_certificate_path', 'ca.crt')
diff --git a/kubernetes-worker/exec.d/docker-compose/charm-pre-install b/kubernetes-worker/exec.d/docker-compose/charm-pre-install
new file mode 100644
index 0000000..f0202c5
--- /dev/null
+++ b/kubernetes-worker/exec.d/docker-compose/charm-pre-install
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+# This stubs out charm-pre-install coming from layer-docker as a workaround for
+# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed.
diff --git a/kubernetes-worker/exec.d/vmware-patch/charm-pre-install b/kubernetes-worker/exec.d/vmware-patch/charm-pre-install
new file mode 100755
index 0000000..b5e6d97
--- /dev/null
+++ b/kubernetes-worker/exec.d/vmware-patch/charm-pre-install
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+MY_HOSTNAME=$(hostname)
+
+: ${JUJU_UNIT_NAME:=`uuidgen`}
+
+
+if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then
+ juju-log "Detected broken vsphere integration. Applying hostname override"
+
+ FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -)
+ juju-log "Setting hostname to $FRIENDLY_HOSTNAME"
+ if [ ! -f /etc/hostname.orig ]; then
+ mv /etc/hostname /etc/hostname.orig
+ fi
+ echo "${FRIENDLY_HOSTNAME}" > /etc/hostname
+ hostname $FRIENDLY_HOSTNAME
+fi
diff --git a/kubernetes-worker/hooks/aws-relation-broken b/kubernetes-worker/hooks/aws-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/aws-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/aws-relation-changed b/kubernetes-worker/hooks/aws-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/aws-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/aws-relation-created b/kubernetes-worker/hooks/aws-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/aws-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/aws-relation-departed b/kubernetes-worker/hooks/aws-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/aws-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/aws-relation-joined b/kubernetes-worker/hooks/aws-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/aws-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/azure-relation-broken b/kubernetes-worker/hooks/azure-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/azure-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/azure-relation-changed b/kubernetes-worker/hooks/azure-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/azure-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/azure-relation-created b/kubernetes-worker/hooks/azure-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/azure-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/azure-relation-departed b/kubernetes-worker/hooks/azure-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/azure-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/azure-relation-joined b/kubernetes-worker/hooks/azure-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/azure-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/certificates-relation-broken b/kubernetes-worker/hooks/certificates-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/certificates-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/certificates-relation-changed b/kubernetes-worker/hooks/certificates-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/certificates-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/certificates-relation-created b/kubernetes-worker/hooks/certificates-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/certificates-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/certificates-relation-departed b/kubernetes-worker/hooks/certificates-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/certificates-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/certificates-relation-joined b/kubernetes-worker/hooks/certificates-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/certificates-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/cni-relation-broken b/kubernetes-worker/hooks/cni-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/cni-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/cni-relation-changed b/kubernetes-worker/hooks/cni-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/cni-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/cni-relation-created b/kubernetes-worker/hooks/cni-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/cni-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/cni-relation-departed b/kubernetes-worker/hooks/cni-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/cni-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/cni-relation-joined b/kubernetes-worker/hooks/cni-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/cni-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/collect-metrics b/kubernetes-worker/hooks/collect-metrics
new file mode 100755
index 0000000..8a27863
--- /dev/null
+++ b/kubernetes-worker/hooks/collect-metrics
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+import yaml
+import os
+from subprocess import check_output, check_call, CalledProcessError
+
+
+def build_command(doc):
+ values = {}
+ metrics = doc.get("metrics", {})
+ for metric, mdoc in metrics.items():
+ if not mdoc:
+ continue
+ cmd = mdoc.get("command")
+ if cmd:
+ try:
+ value = check_output(cmd, shell=True, universal_newlines=True)
+ except CalledProcessError as e:
+ check_call(['juju-log', '-lERROR',
+ 'Error collecting metric {}:\n{}'.format(
+ metric, e.output)])
+ continue
+ value = value.strip()
+ if value:
+ values[metric] = value
+
+ if not values:
+ return None
+ command = ["add-metric"]
+ for metric, value in values.items():
+ command.append("%s=%s" % (metric, value))
+ return command
+
+
+if __name__ == '__main__':
+ charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, "..")))
+ metrics_yaml = os.path.join(charm_dir, "metrics.yaml")
+ with open(metrics_yaml) as f:
+ doc = yaml.load(f)
+ command = build_command(doc)
+ if command:
+ check_call(command)
diff --git a/kubernetes-worker/hooks/config-changed b/kubernetes-worker/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/container-runtime-relation-broken b/kubernetes-worker/hooks/container-runtime-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/container-runtime-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/container-runtime-relation-changed b/kubernetes-worker/hooks/container-runtime-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/container-runtime-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/container-runtime-relation-created b/kubernetes-worker/hooks/container-runtime-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/container-runtime-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/container-runtime-relation-departed b/kubernetes-worker/hooks/container-runtime-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/container-runtime-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/container-runtime-relation-joined b/kubernetes-worker/hooks/container-runtime-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/container-runtime-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/coordinator-relation-broken b/kubernetes-worker/hooks/coordinator-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/coordinator-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/coordinator-relation-changed b/kubernetes-worker/hooks/coordinator-relation-changed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-worker/hooks/coordinator-relation-changed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-worker/hooks/coordinator-relation-created b/kubernetes-worker/hooks/coordinator-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/coordinator-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/coordinator-relation-departed b/kubernetes-worker/hooks/coordinator-relation-departed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-worker/hooks/coordinator-relation-departed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-worker/hooks/coordinator-relation-joined b/kubernetes-worker/hooks/coordinator-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/coordinator-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/gcp-relation-broken b/kubernetes-worker/hooks/gcp-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/gcp-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/gcp-relation-changed b/kubernetes-worker/hooks/gcp-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/gcp-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/gcp-relation-created b/kubernetes-worker/hooks/gcp-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/gcp-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/gcp-relation-departed b/kubernetes-worker/hooks/gcp-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/gcp-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/gcp-relation-joined b/kubernetes-worker/hooks/gcp-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/gcp-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/hook.template b/kubernetes-worker/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-broken b/kubernetes-worker/hooks/ingress-proxy-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/ingress-proxy-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-changed b/kubernetes-worker/hooks/ingress-proxy-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/ingress-proxy-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-created b/kubernetes-worker/hooks/ingress-proxy-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/ingress-proxy-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-departed b/kubernetes-worker/hooks/ingress-proxy-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/ingress-proxy-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-joined b/kubernetes-worker/hooks/ingress-proxy-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/ingress-proxy-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/install b/kubernetes-worker/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-broken b/kubernetes-worker/hooks/kube-api-endpoint-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-changed b/kubernetes-worker/hooks/kube-api-endpoint-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-created b/kubernetes-worker/hooks/kube-api-endpoint-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-departed b/kubernetes-worker/hooks/kube-api-endpoint-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-joined b/kubernetes-worker/hooks/kube-api-endpoint-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-control-relation-broken b/kubernetes-worker/hooks/kube-control-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-control-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-control-relation-changed b/kubernetes-worker/hooks/kube-control-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-control-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-control-relation-created b/kubernetes-worker/hooks/kube-control-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-control-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-control-relation-departed b/kubernetes-worker/hooks/kube-control-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-control-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-control-relation-joined b/kubernetes-worker/hooks/kube-control-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-control-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-dns-relation-broken b/kubernetes-worker/hooks/kube-dns-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-dns-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-dns-relation-changed b/kubernetes-worker/hooks/kube-dns-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-dns-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-dns-relation-created b/kubernetes-worker/hooks/kube-dns-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-dns-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-dns-relation-departed b/kubernetes-worker/hooks/kube-dns-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-dns-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/kube-dns-relation-joined b/kubernetes-worker/hooks/kube-dns-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/kube-dns-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/leader-elected b/kubernetes-worker/hooks/leader-elected
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-worker/hooks/leader-elected
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-worker/hooks/leader-settings-changed b/kubernetes-worker/hooks/leader-settings-changed
new file mode 100755
index 0000000..fe39f65
--- /dev/null
+++ b/kubernetes-worker/hooks/leader-settings-changed
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+
+# Load modules from $CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer.basic import bootstrap_charm_deps
+bootstrap_charm_deps()
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive,
+# and $CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main
+main()
diff --git a/kubernetes-worker/hooks/nfs-relation-broken b/kubernetes-worker/hooks/nfs-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nfs-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nfs-relation-changed b/kubernetes-worker/hooks/nfs-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nfs-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nfs-relation-created b/kubernetes-worker/hooks/nfs-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nfs-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nfs-relation-departed b/kubernetes-worker/hooks/nfs-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nfs-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nfs-relation-joined b/kubernetes-worker/hooks/nfs-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nfs-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-broken b/kubernetes-worker/hooks/nrpe-external-master-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nrpe-external-master-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-changed b/kubernetes-worker/hooks/nrpe-external-master-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-created b/kubernetes-worker/hooks/nrpe-external-master-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nrpe-external-master-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-departed b/kubernetes-worker/hooks/nrpe-external-master-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nrpe-external-master-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-joined b/kubernetes-worker/hooks/nrpe-external-master-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/nrpe-external-master-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/openstack-relation-broken b/kubernetes-worker/hooks/openstack-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/openstack-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/openstack-relation-changed b/kubernetes-worker/hooks/openstack-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/openstack-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/openstack-relation-created b/kubernetes-worker/hooks/openstack-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/openstack-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/openstack-relation-departed b/kubernetes-worker/hooks/openstack-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/openstack-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/openstack-relation-joined b/kubernetes-worker/hooks/openstack-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/openstack-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/post-series-upgrade b/kubernetes-worker/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/pre-series-upgrade b/kubernetes-worker/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/relations/aws-integration/.gitignore b/kubernetes-worker/hooks/relations/aws-integration/.gitignore
new file mode 100644
index 0000000..ba1431e
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/.gitignore
@@ -0,0 +1,2 @@
+.tox
+__pycache__
diff --git a/kubernetes-worker/hooks/relations/aws-integration/LICENSE b/kubernetes-worker/hooks/relations/aws-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/aws-integration/README.md b/kubernetes-worker/hooks/relations/aws-integration/README.md
new file mode 100644
index 0000000..59abfcf
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `aws-integration` interface communciation protocol
+and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:aws-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:aws-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `aws-integration` interface protocol:
+
+```yaml
+requires:
+ aws:
+ interface: aws-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the aws-integrator charm)
diff --git a/kubernetes-worker/hooks/relations/aws-integration/__init__.py b/kubernetes-worker/hooks/relations/aws-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/aws-integration/copyright b/kubernetes-worker/hooks/relations/aws-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md b/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md
new file mode 100644
index 0000000..57ecb25
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md
@@ -0,0 +1,179 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the AWS
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for AWS integration features.
+ The AWS integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
+
clear
+
+```python
+IntegrationRequest.clear(self)
+```
+
+Clear this request's cached data.
+
diff --git a/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md b/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md
new file mode 100644
index 0000000..41607f4
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md
@@ -0,0 +1,178 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with AWS native features. The integration will
+be provided by the AWS integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of AWS specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific AWS features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the AWS instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
+
+```python
+AWSIntegrationRequires.tag_instance(self, tags)
+```
+
+Request that the given tags be applied to this instance.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
tag_instance_security_group
+
+```python
+AWSIntegrationRequires.tag_instance_security_group(self, tags)
+```
+
+Request that the given tags be applied to this instance's
+machine-specific security group (firewall) created by Juju.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
tag_instance_subnet
+
+```python
+AWSIntegrationRequires.tag_instance_subnet(self, tags)
+```
+
+Request that the given tags be applied to this instance's subnet.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tag names to values (or `None`).
+
+
+
+```python
+AWSIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+AWSIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking (firewalls, subnets, etc).
+
+
enable_load_balancer_management
+
+```python
+AWSIntegrationRequires.enable_load_balancer_management(self)
+```
+
+Request the ability to manage load balancers.
+
+
enable_block_storage_management
+
+```python
+AWSIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+AWSIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+AWSIntegrationRequires.enable_object_storage_access(self, patterns=None)
+```
+
+Request the ability to access object storage.
+
+__Parameters__
+
+- __`patterns` (list)__: If given, restrict access to the resources matching
+ the patterns. If patterns do not start with the S3 ARN prefix
+- __(`arn__:aws:s3:::`), it will be prepended.
+
+
enable_object_storage_management
+
+```python
+AWSIntegrationRequires.enable_object_storage_management(self, patterns=None)
+```
+
+Request the ability to manage object storage.
+
+__Parameters__
+
+- __`patterns` (list)__: If given, restrict management to the resources
+ matching the patterns. If patterns do not start with the S3 ARN
+- __prefix (`arn__:aws:s3:::`), it will be prepended.
+
diff --git a/kubernetes-worker/hooks/relations/aws-integration/interface.yaml b/kubernetes-worker/hooks/relations/aws-integration/interface.yaml
new file mode 100644
index 0000000..fe3da6d
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: aws-integration
+summary: Interface for connecting to the AWS integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-worker/hooks/relations/aws-integration/make_docs b/kubernetes-worker/hooks/relations/aws-integration/make_docs
new file mode 100644
index 0000000..72b69c2
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'aws': {'interface': 'aws-integration'}},
+ 'provides': {'aws': {'interface': 'aws-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/aws-integration/provides.py b/kubernetes-worker/hooks/relations/aws-integration/provides.py
new file mode 100644
index 0000000..ae94211
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/provides.py
@@ -0,0 +1,288 @@
+"""
+This is the provides side of the interface layer, for use only by the AWS
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for AWS integration features.
+ The AWS integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+import json
+from hashlib import sha256
+
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class AWSIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.aws.requested')
+ def handle_requests():
+ aws = endpoint_from_flag('endpoint.aws.requested')
+ for request in aws.requests:
+ if request.instance_tags:
+ tag_instance(
+ request.instance_id,
+ request.region,
+ request.instance_tags)
+ if request.requested_load_balancer_management:
+ layer.aws.enable_load_balancer_management(
+ request.application_name,
+ request.instance_id,
+ request.region,
+ )
+ # ...
+ request.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ requests = self.requests
+ toggle_flag(self.expand_name('requested'), len(requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @when('endpoint.{endpoint_name}.departed')
+ def cleanup(self):
+ for unit in self.all_departed_units:
+ request = IntegrationRequest(unit)
+ request.clear()
+ self.all_departed_units.clear()
+ clear_flag(self.expand_name('departed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ return [request for request in self.all_requests if request.changed]
+
+ @property
+ def all_requests(self):
+ """
+ A list of all the #IntegrationRequests that have been made,
+ even if unchanged.
+ """
+ return [IntegrationRequest(unit) for unit in self.all_joined_units]
+
+ @property
+ def application_names(self):
+ """
+ Set of names of all applications that are still joined.
+ """
+ return {unit.application_name for unit in self.all_joined_units}
+
+ @property
+ def unit_instances(self):
+ """
+ Mapping of unit names to instance IDs and regions for all joined units.
+ """
+ return {
+ unit.unit_name: {
+ 'instance-id': unit.received['instance-id'],
+ 'region': unit.received['region'],
+ } for unit in self.all_joined_units
+ }
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+ self._hash = sha256(json.dumps(dict(unit.received),
+ sort_keys=True).encode('utf8')
+ ).hexdigest()
+
+ @property
+ def hash(self):
+ """
+ SHA hash of the data for this request.
+ """
+ return self._hash
+
+ @property
+ def _hash_key(self):
+ endpoint = self._unit.relation.endpoint
+ return endpoint.expand_name('request.{}'.format(self.instance_id))
+
+ @property
+ def changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed.
+ """
+ if not (self.instance_id and self._requested):
+ return False
+ saved_hash = unitdata.kv().get(self._hash_key)
+ result = saved_hash != self.hash
+ return result
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._unit.relation.to_publish.get('completed', {})
+ completed[self.instance_id] = self.hash
+ unitdata.kv().set(self._hash_key, self.hash)
+ self._unit.relation.to_publish['completed'] = completed
+
+ def clear(self):
+ """
+ Clear this request's cached data.
+ """
+ unitdata.kv().unset(self._hash_key)
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def instance_id(self):
+ """
+ The instance ID reported for this request.
+ """
+ return self._unit.received['instance-id']
+
+ @property
+ def region(self):
+ """
+ The region reported for this request.
+ """
+ return self._unit.received['region']
+
+ @property
+ def instance_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-tags', {}))
+
+ @property
+ def instance_security_group_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance's
+ machine-specific security group (firewall).
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-security-group-tags',
+ {}))
+
+ @property
+ def instance_subnet_tags(self):
+ """
+ Mapping of tag names to values (or `None`) to apply to this instance's
+ subnet.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-subnet-tags', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_acm_readonly(self):
+ """
+ Flag indicating whether acm readonly was requested.
+ """
+ return bool(self._unit.received['enable-acm-readonly'])
+
+ @property
+ def requested_acm_fullaccess(self):
+ """
+ Flag indicating whether acm fullaccess was requested.
+ """
+ return bool(self._unit.received['enable-acm-fullaccess'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking (firewalls,
+ subnets, etc) was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_load_balancer_management(self):
+ """
+ Flag indicating whether load balancer management was requested.
+ """
+ return bool(self._unit.received['enable-load-balancer-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def object_storage_access_patterns(self):
+ """
+ List of patterns to which to restrict object storage access.
+ """
+ return list(
+ self._unit.received['object-storage-access-patterns'] or [])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
+
+ @property
+ def object_storage_management_patterns(self):
+ """
+ List of patterns to which to restrict object storage management.
+ """
+ return list(
+ self._unit.received['object-storage-management-patterns'] or [])
diff --git a/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml
new file mode 100644
index 0000000..70a2e75
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'AWS Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.AWSIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.AWSIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/aws-integration/requires.py b/kubernetes-worker/hooks/relations/aws-integration/requires.py
new file mode 100644
index 0000000..c457e02
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/aws-integration/requires.py
@@ -0,0 +1,262 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with AWS native features. The integration will
+be provided by the AWS integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of AWS specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific AWS features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the AWS instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import json
+import string
+from hashlib import sha256
+from urllib.parse import urljoin
+from urllib.request import urlopen
+
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from AWS metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class AWSIntegrationRequires(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.aws.joined')
+ def request_aws_integration():
+ aws = endpoint_from_flag('endpoint.aws.joined')
+ aws.request_instance_tags({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ aws.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.aws.ready')
+ def aws_integration_ready():
+ update_config_enable_aws()
+ ```
+ """
+ # the IP is the AWS metadata service, documented here:
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
+ _metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ _instance_id_url = urljoin(_metadata_url, 'instance-id')
+ _az_url = urljoin(_metadata_url, 'placement/availability-zone')
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._instance_id = None
+ self._region = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single AWS integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single AWS integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['instance-id'] = self.instance_id
+ self._to_publish['region'] = self.region
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ completed = self._received.get('completed', {})
+ actual_hash = completed.get(self.instance_id)
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'),
+ self._requested and actual_hash == self._expected_hash)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def instance_id(self):
+ """
+ This unit's instance-id.
+ """
+ if self._instance_id is None:
+ cache_key = self.expand_name('instance-id')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._instance_id = cached
+ else:
+ with urlopen(self._instance_id_url) as fd:
+ self._instance_id = fd.read(READ_BLOCK_SIZE).decode('utf8')
+ unitdata.kv().set(cache_key, self._instance_id)
+ return self._instance_id
+
+ @property
+ def region(self):
+ """
+ The region this unit is in.
+ """
+ if self._region is None:
+ cache_key = self.expand_name('region')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._region = cached
+ else:
+ with urlopen(self._az_url) as fd:
+ az = fd.read(READ_BLOCK_SIZE).decode('utf8')
+ self._region = az.rstrip(string.ascii_lowercase)
+ unitdata.kv().set(cache_key, self._region)
+ return self._region
+
+ @property
+ def _expected_hash(self):
+ return sha256(json.dumps(dict(self._to_publish),
+ sort_keys=True).encode('utf8')).hexdigest()
+
+ @property
+ def _requested(self):
+ # whether or not a request has been issued
+ return self._to_publish['requested']
+
+ def _request(self, keyvals):
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = True
+ clear_flag(self.expand_name('ready'))
+
+ def tag_instance(self, tags):
+ """
+ Request that the given tags be applied to this instance.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-tags': dict(tags)})
+
+ def tag_instance_security_group(self, tags):
+ """
+ Request that the given tags be applied to this instance's
+ machine-specific security group (firewall) created by Juju.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-security-group-tags': dict(tags)})
+
+ def tag_instance_subnet(self, tags):
+ """
+ Request that the given tags be applied to this instance's subnet.
+
+ # Parameters
+ `tags` (dict): Mapping of tag names to values (or `None`).
+ """
+ self._request({'instance-subnet-tags': dict(tags)})
+
+ def enable_acm_readonly(self):
+ """
+ Request readonly for ACM.
+ """
+ self._request({'enable-acm-readonly': True})
+
+ def enable_acm_fullaccess(self):
+ """
+ Request fullaccess for ACM.
+ """
+ self._request({'enable-acm-fullaccess': True})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking (firewalls, subnets, etc).
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_load_balancer_management(self):
+ """
+ Request the ability to manage load balancers.
+ """
+ self._request({'enable-load-balancer-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns-management': True})
+
+ def enable_object_storage_access(self, patterns=None):
+ """
+ Request the ability to access object storage.
+
+ # Parameters
+ `patterns` (list): If given, restrict access to the resources matching
+ the patterns. If patterns do not start with the S3 ARN prefix
+ (`arn:aws:s3:::`), it will be prepended.
+ """
+ if patterns:
+ for i, pattern in enumerate(patterns):
+ if not pattern.startswith('arn:aws:s3:::'):
+ patterns[i] = 'arn:aws:s3:::{}'.format(pattern)
+ self._request({
+ 'enable-object-storage-access': True,
+ 'object-storage-access-patterns': patterns,
+ })
+
+ def enable_object_storage_management(self, patterns=None):
+ """
+ Request the ability to manage object storage.
+
+ # Parameters
+ `patterns` (list): If given, restrict management to the resources
+ matching the patterns. If patterns do not start with the S3 ARN
+ prefix (`arn:aws:s3:::`), it will be prepended.
+ """
+ if patterns:
+ for i, pattern in enumerate(patterns):
+ if not pattern.startswith('arn:aws:s3:::'):
+ patterns[i] = 'arn:aws:s3:::{}'.format(pattern)
+ self._request({
+ 'enable-object-storage-management': True,
+ 'object-storage-management-patterns': patterns,
+ })
diff --git a/kubernetes-worker/hooks/relations/azure-integration/.gitignore b/kubernetes-worker/hooks/relations/azure-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-worker/hooks/relations/azure-integration/LICENSE b/kubernetes-worker/hooks/relations/azure-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/azure-integration/README.md b/kubernetes-worker/hooks/relations/azure-integration/README.md
new file mode 100644
index 0000000..ddcae26
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `azure-integration` interface communciation
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:azure-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:azure-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `azure-integration` interface protocol:
+
+```yaml
+requires:
+ azure:
+ interface: azure-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the azure-integrator charm)
diff --git a/kubernetes-worker/hooks/relations/azure-integration/__init__.py b/kubernetes-worker/hooks/relations/azure-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/azure-integration/copyright b/kubernetes-worker/hooks/relations/azure-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md b/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md
new file mode 100644
index 0000000..4348dff
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md
@@ -0,0 +1,175 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the Azure
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for Azure integration features.
+ The Azure integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+
+A list of the IDs of all established relations.
+
+
requests
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
get_departed_charms
+
+```python
+AzureIntegrationProvides.get_departed_charms(self)
+```
+
+Get a list of all charms that have had all units depart since the
+last time this was called.
+
+
mark_completed
+
+```python
+AzureIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
application_name
+
+
+The name of the application making the request.
+
+
charm
+
+
+The charm name reported for this request.
+
+
instance_tags
+
+
+Mapping of tag names to values to apply to this instance.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
model_uuid
+
+
+The UUID of the model containing the application making this request.
+
+
relation_id
+
+
+The ID of the relation for the unit making the request.
+
+
+
+
+The resource group reported for this request.
+
+
unit_name
+
+
+The name of the unit making the request.
+
+
vm_id
+
+
+The instance ID reported for this request.
+
+
vm_name
+
+
+The instance name reported for this request.
+
+
mark_completed
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
diff --git a/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md b/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md
new file mode 100644
index 0000000..608b4ee
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md
@@ -0,0 +1,145 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with Azure native features. The integration will
+be provided by the Azure integrator charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of Azure specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific Azure features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the Azure instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
AzureIntegrationRequires
+
+```python
+AzureIntegrationRequires(self, *args, **kwargs)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.azure.joined')
+def request_azure_integration():
+ azure = endpoint_from_flag('endpoint.azure.joined')
+ azure.tag_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ azure.request_load_balancer_management()
+ # ...
+
+@when('endpoint.azure.ready')
+def azure_integration_ready():
+ update_config_enable_azure()
+```
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
resource_group
+
+
+The resource group this unit is in.
+
+
vm_id
+
+
+This unit's instance ID.
+
+
vm_name
+
+
+This unit's instance name.
+
+
tag_instance
+
+```python
+AzureIntegrationRequires.tag_instance(self, tags)
+```
+
+Request that the given tags be applied to this instance.
+
+__Parameters__
+
+- __`tags` (dict)__: Mapping of tags names to values.
+
+
enable_instance_inspection
+
+```python
+AzureIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+AzureIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking.
+
+
enable_security_management
+
+```python
+AzureIntegrationRequires.enable_security_management(self)
+```
+
+Request the ability to manage security (e.g., firewalls).
+
+
enable_block_storage_management
+
+```python
+AzureIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+AzureIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+AzureIntegrationRequires.enable_object_storage_access(self)
+```
+
+Request the ability to access object storage.
+
+
enable_object_storage_management
+
+```python
+AzureIntegrationRequires.enable_object_storage_management(self)
+```
+
+Request the ability to manage object storage.
+
diff --git a/kubernetes-worker/hooks/relations/azure-integration/interface.yaml b/kubernetes-worker/hooks/relations/azure-integration/interface.yaml
new file mode 100644
index 0000000..a77a7cb
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: azure-integration
+summary: Interface for connecting to the Azure integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-worker/hooks/relations/azure-integration/make_docs b/kubernetes-worker/hooks/relations/azure-integration/make_docs
new file mode 100644
index 0000000..84df5ee
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'azure': {'interface': 'azure-integration'}},
+ 'provides': {'azure': {'interface': 'azure-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/azure-integration/provides.py b/kubernetes-worker/hooks/relations/azure-integration/provides.py
new file mode 100644
index 0000000..e0d596e
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/provides.py
@@ -0,0 +1,267 @@
+"""
+This is the provides side of the interface layer, for use only by the Azure
+integrator charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for Azure integration features.
+ The Azure integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class AzureIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.azure.requests-pending')
+ def handle_requests():
+ azure = endpoint_from_flag('endpoint.azure.requests-pending')
+ for request in azure.requests:
+ if request.instance_tags:
+ layer.azure.tag_instance(
+ request.vm_name,
+ request.resource_group,
+ request.instance_tags)
+ if request.requested_load_balancer_management:
+ layer.azure.enable_load_balancer_management(
+ request.charm,
+ request.vm_name,
+ request.resource_group,
+ )
+ # ...
+ azure.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ if not hasattr(self, '_requests'):
+ all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ is_changed = attrgetter('is_changed')
+ self._requests = list(filter(is_changed, all_requests))
+ return self._requests
+
+ @property
+ def relation_ids(self):
+ """
+ A list of the IDs of all established relations.
+ """
+ return [relation.relation_id for relation in self.relations]
+
+ def get_departed_charms(self):
+ """
+ Get a list of all charms that have had all units depart since the
+ last time this was called.
+ """
+ joined_charms = {unit.received['charm']
+ for unit in self.all_joined_units
+ if unit.received['charm']}
+ departed_charms = [unit.received['charm']
+ for unit in self.all_departed_units
+ if unit.received['charm'] not in joined_charms]
+ self.all_departed_units.clear()
+ return departed_charms
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ for request in self.requests:
+ request.mark_completed()
+ clear_flag(self.expand_name('requests-pending'))
+ self._requests = []
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def _completed(self):
+ return self._to_publish.get('completed', {})
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ if not all([self.charm, self.vm_id, self.vm_name,
+ self.resource_group, self._requested]):
+ return False
+ return self._completed.get(self.vm_id) != self._requested
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._completed
+ completed[self.vm_id] = self._requested
+ self._to_publish['completed'] = completed # have to explicitly update
+
+ def send_additional_metadata(self, resource_group_location,
+ vnet_name, vnet_resource_group,
+ subnet_name, security_group_name):
+ self._to_publish.update({
+ 'resource-group-location': resource_group_location,
+ 'vnet-name': vnet_name,
+ 'vnet-resource-group': vnet_resource_group,
+ 'subnet-name': subnet_name,
+ 'security-group-name': security_group_name,
+ })
+
+ @property
+ def relation_id(self):
+ """
+ The ID of the relation for the unit making the request.
+ """
+ return self._unit.relation.relation_id
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def charm(self):
+ """
+ The charm name reported for this request.
+ """
+ return self._unit.received['charm']
+
+ @property
+ def vm_id(self):
+ """
+ The instance ID reported for this request.
+ """
+ return self._unit.received['vm-id']
+
+ @property
+ def vm_name(self):
+ """
+ The instance name reported for this request.
+ """
+ return self._unit.received['vm-name']
+
+ @property
+ def resource_group(self):
+ """
+ The resource group reported for this request.
+ """
+ return self._unit.received['res-group']
+
+ @property
+ def model_uuid(self):
+ """
+ The UUID of the model containing the application making this request.
+ """
+ return self._unit.received['model-uuid']
+
+ @property
+ def instance_tags(self):
+ """
+ Mapping of tag names to values to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-tags', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_loadbalancer_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-loadbalancer-management'])
+
+
+ @property
+ def requested_security_management(self):
+ """
+ Flag indicating whether security management was requested.
+ """
+ return bool(self._unit.received['enable-security-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
diff --git a/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml
new file mode 100644
index 0000000..6414c29
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Azure Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.AzureIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.AzureIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/azure-integration/requires.py b/kubernetes-worker/hooks/relations/azure-integration/requires.py
new file mode 100644
index 0000000..62f2b01
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/azure-integration/requires.py
@@ -0,0 +1,282 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with Azure native features. The integration will
+be provided by the Azure integrator charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of Azure specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific Azure features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the Azure instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import json
+import os
+import random
+import string
+from urllib.request import urlopen, Request
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from Azure metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class AzureIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.azure.joined')
+ def request_azure_integration():
+ azure = endpoint_from_flag('endpoint.azure.joined')
+ azure.tag_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ azure.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.azure.ready')
+ def azure_integration_ready():
+ update_config_enable_azure()
+ ```
+ """
+ # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
+ _metadata_url = 'http://169.254.169.254/metadata/instance?api-version=2017-12-01' # noqa
+ _metadata_headers = {'Metadata': 'true'}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._vm_metadata = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single Azure integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single Azure integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['charm'] = hookenv.charm_name()
+ self._to_publish['vm-id'] = self.vm_id
+ self._to_publish['vm-name'] = self.vm_name
+ self._to_publish['res-group'] = self.resource_group
+ self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def vm_metadata(self):
+ if self._vm_metadata is None:
+ cache_key = self.expand_name('vm-metadata')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._vm_metadata = cached
+ else:
+ req = Request(self._metadata_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ metadata = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._vm_metadata = json.loads(metadata)
+ unitdata.kv().set(cache_key, self._vm_metadata)
+ return self._vm_metadata
+
+ @property
+ def vm_id(self):
+ """
+ This unit's instance ID.
+ """
+ return self.vm_metadata['compute']['vmId']
+
+ @property
+ def vm_name(self):
+ """
+ This unit's instance name.
+ """
+ return self.vm_metadata['compute']['name']
+
+ @property
+ def vm_location(self):
+ """
+ The location (region) the instance is running in.
+ """
+ return self.vm_metadata['compute']['location']
+
+ @property
+ def resource_group(self):
+ """
+ The resource group this unit is in.
+ """
+ return self.vm_metadata['compute']['resourceGroupName']
+
+ @property
+ def resource_group_location(self):
+ """
+ The location (region) the resource group is in.
+ """
+ return self._received['resource-group-location']
+
+ @property
+ def subscription_id(self):
+ """
+ The ID of the Azure Subscription this unit is in.
+ """
+ return self.vm_metadata['compute']['subscriptionId']
+
+ @property
+ def vnet_name(self):
+ """
+ The name of the virtual network the instance is in.
+ """
+ return self._received['vnet-name']
+
+ @property
+ def vnet_resource_group(self):
+ """
+ The name of the virtual network the instance is in.
+ """
+ return self._received['vnet-resource-group']
+
+ @property
+ def subnet_name(self):
+ """
+ The name of the subnet the instance is in.
+ """
+ return self._received['subnet-name']
+
+ @property
+ def security_group_name(self):
+ """
+ The name of the security group attached to the cluster's subnet.
+ """
+ return self._received['security-group-name']
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ requested = self._to_publish['requested']
+ completed = self._received.get('completed', {}).get(self.vm_id)
+ return requested and requested == completed
+
+ @property
+ def credentials(self):
+ return self._received['credentials']
+
+ def _request(self, keyvals):
+ alphabet = string.ascii_letters + string.digits
+ nonce = ''.join(random.choice(alphabet) for _ in range(8))
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = nonce
+ clear_flag(self.expand_name('ready'))
+
+ def tag_instance(self, tags):
+ """
+ Request that the given tags be applied to this instance.
+
+ # Parameters
+ `tags` (dict): Mapping of tags names to values.
+ """
+ self._request({'instance-tags': dict(tags)})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_loadbalancer_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-loadbalancer-management': True})
+
+
+ def enable_security_management(self):
+ """
+ Request the ability to manage security (e.g., firewalls).
+ """
+ self._request({'enable-security-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns': True})
+
+ def enable_object_storage_access(self):
+ """
+ Request the ability to access object storage.
+ """
+ self._request({'enable-object-storage-access': True})
+
+ def enable_object_storage_management(self):
+ """
+ Request the ability to manage object storage.
+ """
+ self._request({'enable-object-storage-management': True})
+
+
diff --git a/kubernetes-worker/hooks/relations/container-runtime/.gitignore b/kubernetes-worker/hooks/relations/container-runtime/.gitignore
new file mode 100644
index 0000000..894a44c
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/.gitignore
@@ -0,0 +1,104 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
diff --git a/kubernetes-worker/hooks/relations/container-runtime/LICENSE b/kubernetes-worker/hooks/relations/container-runtime/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/container-runtime/README.md b/kubernetes-worker/hooks/relations/container-runtime/README.md
new file mode 100644
index 0000000..4620013
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/README.md
@@ -0,0 +1,45 @@
+# interface-container-runtime
+
+## Overview
+
+This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime.
+
+## Usage
+
+### Provides
+
+The providing side of the container interface provides a place for a container runtime to connect to.
+
+Your charm should respond to the `endpoint.{endpoint_name}.available` state,
+which indicates that there is a container runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def update_kubelet_config(containerd):
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ config = endpoint.get_config()
+ kubelet.config['container-runtime'] = \
+ config['runtime']
+```
+
+### Requires
+
+The requiring side of the container interface requires a place for a container runtime to connect to.
+
+Your charm should set `{endpoint_name}.available` state,
+which indicates that the container is runtime connected.
+
+A trivial example of handling this interface would be:
+
+```python
+@when('endpoint.containerd.joined')
+def pubish_config():
+ endpoint = endpoint_from_flag('endpoint.containerd.joined')
+ endpoint.set_config(
+ socket='unix:///var/run/containerd/containerd.sock',
+ runtime='remote',
+ nvidia_enabled=False
+ )
+```
diff --git a/kubernetes-worker/hooks/relations/container-runtime/__init__.py b/kubernetes-worker/hooks/relations/container-runtime/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/container-runtime/interface.yaml b/kubernetes-worker/hooks/relations/container-runtime/interface.yaml
new file mode 100644
index 0000000..294be1e
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/interface.yaml
@@ -0,0 +1,4 @@
+name: container-runtime
+summary: Interface for relating to container runtimes
+version: 1
+maintainer: "Joe Borg "
diff --git a/kubernetes-worker/hooks/relations/container-runtime/provides.py b/kubernetes-worker/hooks/relations/container-runtime/provides.py
new file mode 100644
index 0000000..a9768a8
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/provides.py
@@ -0,0 +1,55 @@
+from charms.reactive import (
+ Endpoint,
+ toggle_flag
+)
+
+
+class ContainerRuntimeProvides(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+
+ def _get_config(self, key):
+ """
+ Get the published configuration for a given key.
+
+ :param key: String dict key
+ :return: String value for given key
+ """
+ return self.all_joined_units.received.get(key)
+
+ def get_nvidia_enabled(self):
+ """
+ Get the published nvidia config.
+
+ :return: String
+ """
+ return self._get_config(key='nvidia_enabled')
+
+ def get_runtime(self):
+ """
+ Get the published runtime config.
+
+ :return: String
+ """
+ return self._get_config(key='runtime')
+
+ def get_socket(self):
+ """
+ Get the published socket config.
+
+ :return: String
+ """
+ return self._get_config(key='socket')
+
+ def set_config(self, sandbox_image=None):
+ """
+ Set the configuration to be published.
+
+ :param sandbox_image: String to optionally override the sandbox image
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'sandbox_image': sandbox_image
+ })
diff --git a/kubernetes-worker/hooks/relations/container-runtime/requires.py b/kubernetes-worker/hooks/relations/container-runtime/requires.py
new file mode 100644
index 0000000..c461b68
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/container-runtime/requires.py
@@ -0,0 +1,61 @@
+from charms.reactive import (
+ Endpoint,
+ clear_flag,
+ data_changed,
+ is_data_changed,
+ toggle_flag
+)
+
+
+class ContainerRuntimeRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'),
+ self.is_joined)
+ toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'),
+ self.is_joined and self._config_changed())
+
+ def _config_changed(self):
+ """
+ Determine if our received data has changed.
+
+ :return: Boolean
+ """
+ # NB: this call should match whatever we're tracking in handle_remote_config
+ return is_data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def handle_remote_config(self):
+ """
+ Keep track of received data so we can know if it changes.
+
+ :return: None
+ """
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'))
+ # Presently, we only care about one piece of remote config. Expand
+ # the list as needed.
+ data_changed('containerd.remote_config',
+ [self.get_sandbox_image()])
+
+ def get_sandbox_image(self):
+ """
+ Get the sandbox image URI if a remote has published one.
+
+ :return: String: remotely configured sandbox image
+ """
+ return self.all_joined_units.received.get('sandbox_image')
+
+ def set_config(self, socket, runtime, nvidia_enabled):
+ """
+ Set the configuration to be published.
+
+ :param socket: String uri to runtime socket
+ :param runtime: String runtime executable
+ :param nvidia_enabled: Boolean nvidia runtime enabled
+ :return: None
+ """
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'socket': socket,
+ 'runtime': runtime,
+ 'nvidia_enabled': nvidia_enabled
+ })
diff --git a/kubernetes-worker/hooks/relations/coordinator/peers.py b/kubernetes-worker/hooks/relations/coordinator/peers.py
new file mode 100644
index 0000000..f443bf6
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/coordinator/peers.py
@@ -0,0 +1,21 @@
+# Copyright 2016-2018 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju charms.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charms import reactive
+
+
+class CoordinatorPeer(reactive.Endpoint):
+ pass
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/.gitignore b/kubernetes-worker/hooks/relations/gcp-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/LICENSE b/kubernetes-worker/hooks/relations/gcp-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/README.md b/kubernetes-worker/hooks/relations/gcp-integration/README.md
new file mode 100644
index 0000000..42861fb
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `gcp-integration` interface communication protocol
+and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:gcp-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:gcp-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `gcp-integration` interface protocol:
+
+```yaml
+requires:
+ gcp:
+ interface: gcp-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the gcp-integrator charm)
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/__init__.py b/kubernetes-worker/hooks/relations/gcp-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/copyright b/kubernetes-worker/hooks/relations/gcp-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md b/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md
new file mode 100644
index 0000000..6f29a39
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md
@@ -0,0 +1,183 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the GCP
+integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for GCP integration features.
+ The GCP integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+
+
+
+
+A list of the IDs of all established relations.
+
+
requests
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
get_departed_charms
+
+```python
+GCPIntegrationProvides.get_departed_charms(self)
+```
+
+Get a list of all charms that have had all units depart since the
+last time this was called.
+
+
mark_completed
+
+```python
+GCPIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
application_name
+
+
+The name of the application making the request.
+
+
charm
+
+
+The charm name reported for this request.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
instance
+
+
+The instance name reported for this request.
+
+
instance_labels
+
+
+Mapping of label names to values to apply to this instance.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
model_uuid
+
+
+The UUID of the model containing the application making this request.
+
+
relation_id
+
+
+The ID of the relation for the unit making the request.
+
+
+
+```python
+IntegrationRequest.mark_completed(self)
+```
+
+Mark this request as having been completed.
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(self, credentials)
+```
+
+Set the credentials for this request.
+
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md b/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md
new file mode 100644
index 0000000..36e23c2
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md
@@ -0,0 +1,140 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with GCP native features. The integration will
+be provided by the GCP integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of GCP specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific GCP features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the GCP instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+
+
GCPIntegrationRequires
+
+```python
+GCPIntegrationRequires(self, *args, **kwargs)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.gcp.joined')
+def request_gcp_integration():
+ gcp = endpoint_from_flag('endpoint.gcp.joined')
+ gcp.label_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ gcp.request_load_balancer_management()
+ # ...
+
+@when('endpoint.gcp.ready')
+def gcp_integration_ready():
+ update_config_enable_gcp()
+```
+
+
instance
+
+
+This unit's instance name.
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
zone
+
+
+The zone this unit is in.
+
+
label_instance
+
+```python
+GCPIntegrationRequires.label_instance(self, labels)
+```
+
+Request that the given labels be applied to this instance.
+
+__Parameters__
+
+- __`labels` (dict)__: Mapping of labels names to values.
+
+
enable_instance_inspection
+
+```python
+GCPIntegrationRequires.enable_instance_inspection(self)
+```
+
+Request the ability to inspect instances.
+
+
enable_network_management
+
+```python
+GCPIntegrationRequires.enable_network_management(self)
+```
+
+Request the ability to manage networking.
+
+
enable_security_management
+
+```python
+GCPIntegrationRequires.enable_security_management(self)
+```
+
+Request the ability to manage security (e.g., firewalls).
+
+
enable_block_storage_management
+
+```python
+GCPIntegrationRequires.enable_block_storage_management(self)
+```
+
+Request the ability to manage block storage.
+
+
enable_dns_management
+
+```python
+GCPIntegrationRequires.enable_dns_management(self)
+```
+
+Request the ability to manage DNS.
+
+
enable_object_storage_access
+
+```python
+GCPIntegrationRequires.enable_object_storage_access(self)
+```
+
+Request the ability to access object storage.
+
+
enable_object_storage_management
+
+```python
+GCPIntegrationRequires.enable_object_storage_management(self)
+```
+
+Request the ability to manage object storage.
+
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml b/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml
new file mode 100644
index 0000000..9966e3f
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: gcp-integration
+summary: Interface for connecting to the GCP integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/make_docs b/kubernetes-worker/hooks/relations/gcp-integration/make_docs
new file mode 100644
index 0000000..bd4e54e
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'gcp': {'interface': 'gcp-integration'}},
+ 'provides': {'gcp': {'interface': 'gcp-integration'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/provides.py b/kubernetes-worker/hooks/relations/gcp-integration/provides.py
new file mode 100644
index 0000000..ba34b0d
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/provides.py
@@ -0,0 +1,253 @@
+"""
+This is the provides side of the interface layer, for use only by the GCP
+integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for GCP integration features.
+ The GCP integration charm should then iterate over each request, perform
+ whatever actions are necessary to satisfy those requests, and then mark
+ them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class GCPIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.gcp.requests-pending')
+ def handle_requests():
+ gcp = endpoint_from_flag('endpoint.gcp.requests-pending')
+ for request in gcp.requests:
+ if request.instance_labels:
+ layer.gcp.label_instance(
+ request.instance,
+ request.zone,
+ request.instance_labels)
+ if request.requested_load_balancer_management:
+ layer.gcp.enable_load_balancer_management(
+ request.charm,
+ request.instance,
+ request.zone,
+ )
+ # ...
+ gcp.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that
+ have been made.
+ """
+ if not hasattr(self, '_requests'):
+ all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ is_changed = attrgetter('is_changed')
+ self._requests = list(filter(is_changed, all_requests))
+ return self._requests
+
+ @property
+ def relation_ids(self):
+ """
+ A list of the IDs of all established relations.
+ """
+ return [relation.relation_id for relation in self.relations]
+
+ def get_departed_charms(self):
+ """
+ Get a list of all charms that have had all units depart since the
+ last time this was called.
+ """
+ joined_charms = {unit.received['charm']
+ for unit in self.all_joined_units
+ if unit.received['charm']}
+ departed_charms = [unit.received['charm']
+ for unit in self.all_departed_units
+ if unit.received['charm'] not in joined_charms]
+ self.all_departed_units.clear()
+ return departed_charms
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ for request in self.requests:
+ request.mark_completed()
+ clear_flag(self.expand_name('requests-pending'))
+ self._requests = []
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def _completed(self):
+ return self._to_publish.get('completed', {})
+
+ @property
+ def _requested(self):
+ return self._unit.received['requested']
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ if not all([self.charm, self.instance, self.zone, self._requested]):
+ return False
+ return self._completed.get(self.instance) != self._requested
+
+ def mark_completed(self):
+ """
+ Mark this request as having been completed.
+ """
+ completed = self._completed
+ completed[self.instance] = self._requested
+ self._to_publish['completed'] = completed # have to explicitly update
+
+ def set_credentials(self, credentials):
+ """
+ Set the credentials for this request.
+ """
+ self._unit.relation.to_publish['credentials'] = credentials
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not credentials have been set via `set_credentials`.
+ """
+ return 'credentials' in self._unit.relation.to_publish
+
+ @property
+ def relation_id(self):
+ """
+ The ID of the relation for the unit making the request.
+ """
+ return self._unit.relation.relation_id
+
+ @property
+ def unit_name(self):
+ """
+ The name of the unit making the request.
+ """
+ return self._unit.unit_name
+
+ @property
+ def application_name(self):
+ """
+ The name of the application making the request.
+ """
+ return self._unit.application_name
+
+ @property
+ def charm(self):
+ """
+ The charm name reported for this request.
+ """
+ return self._unit.received['charm']
+
+ @property
+ def instance(self):
+ """
+ The instance name reported for this request.
+ """
+ return self._unit.received['instance']
+
+ @property
+ def zone(self):
+ """
+ The zone reported for this request.
+ """
+ return self._unit.received['zone']
+
+ @property
+ def model_uuid(self):
+ """
+ The UUID of the model containing the application making this request.
+ """
+ return self._unit.received['model-uuid']
+
+ @property
+ def instance_labels(self):
+ """
+ Mapping of label names to values to apply to this instance.
+ """
+ # uses dict() here to make a copy, just to be safe
+ return dict(self._unit.received.get('instance-labels', {}))
+
+ @property
+ def requested_instance_inspection(self):
+ """
+ Flag indicating whether the ability to inspect instances was requested.
+ """
+ return bool(self._unit.received['enable-instance-inspection'])
+
+ @property
+ def requested_network_management(self):
+ """
+ Flag indicating whether the ability to manage networking was requested.
+ """
+ return bool(self._unit.received['enable-network-management'])
+
+ @property
+ def requested_security_management(self):
+ """
+ Flag indicating whether security management was requested.
+ """
+ return bool(self._unit.received['enable-security-management'])
+
+ @property
+ def requested_block_storage_management(self):
+ """
+ Flag indicating whether block storage management was requested.
+ """
+ return bool(self._unit.received['enable-block-storage-management'])
+
+ @property
+ def requested_dns_management(self):
+ """
+ Flag indicating whether DNS management was requested.
+ """
+ return bool(self._unit.received['enable-dns-management'])
+
+ @property
+ def requested_object_storage_access(self):
+ """
+ Flag indicating whether object storage access was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-access'])
+
+ @property
+ def requested_object_storage_management(self):
+ """
+ Flag indicating whether object storage management was requested.
+ """
+ return bool(self._unit.received['enable-object-storage-management'])
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml
new file mode 100644
index 0000000..9ef5e78
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'GCP Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.GCPIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.GCPIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/gcp-integration/requires.py b/kubernetes-worker/hooks/relations/gcp-integration/requires.py
new file mode 100644
index 0000000..bbd191f
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/gcp-integration/requires.py
@@ -0,0 +1,227 @@
+"""
+This is the requires side of the interface layer, for use in charms that
+wish to request integration with GCP native features. The integration will
+be provided by the GCP integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of GCP specific
+API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific GCP features. This flag is automatically removed if
+ the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the GCP instance on which the charm is
+ running. This flag is automatically removed if new integration features
+ are requested. It should not be removed by the charm.
+"""
+
+
+import os
+import random
+import string
+from urllib.parse import urljoin
+from urllib.request import urlopen, Request
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, toggle_flag
+
+
+# block size to read data from GCP metadata service
+# (realistically, just needs to be bigger than ~20 chars)
+READ_BLOCK_SIZE = 2048
+
+
+class GCPIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.gcp.joined')
+ def request_gcp_integration():
+ gcp = endpoint_from_flag('endpoint.gcp.joined')
+ gcp.label_instance({
+ 'tag1': 'value1',
+ 'tag2': None,
+ })
+ gcp.request_load_balancer_management()
+ # ...
+
+ @when('endpoint.gcp.ready')
+ def gcp_integration_ready():
+ update_config_enable_gcp()
+ ```
+ """
+ # https://cloud.google.com/compute/docs/storing-retrieving-metadata
+ _metadata_url = 'http://metadata.google.internal/computeMetadata/v1/'
+ _instance_url = urljoin(_metadata_url, 'instance/name')
+ _zone_url = urljoin(_metadata_url, 'instance/zone')
+ _metadata_headers = {'Metadata-Flavor': 'Google'}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._instance = None
+ self._zone = None
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single GCP integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single GCP integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.joined')
+ def send_instance_info(self):
+ self._to_publish['charm'] = hookenv.charm_name()
+ self._to_publish['instance'] = self.instance
+ self._to_publish['zone'] = self.zone
+ self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def instance(self):
+ """
+ This unit's instance name.
+ """
+ if self._instance is None:
+ cache_key = self.expand_name('instance')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._instance = cached
+ else:
+ req = Request(self._instance_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ instance = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._instance = instance
+ unitdata.kv().set(cache_key, self._instance)
+ return self._instance
+
+ @property
+ def zone(self):
+ """
+ The zone this unit is in.
+ """
+ if self._zone is None:
+ cache_key = self.expand_name('zone')
+ cached = unitdata.kv().get(cache_key)
+ if cached:
+ self._zone = cached
+ else:
+ req = Request(self._zone_url,
+ headers=self._metadata_headers)
+ with urlopen(req) as fd:
+ zone = fd.read(READ_BLOCK_SIZE).decode('utf8').strip()
+ self._zone = zone.split('/')[-1]
+ unitdata.kv().set(cache_key, self._zone)
+ return self._zone
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ requested = self._to_publish['requested']
+ completed = self._received.get('completed', {}).get(self.instance)
+ return requested and requested == completed
+
+ @property
+ def credentials(self):
+ return self._received['credentials']
+
+ def _request(self, keyvals):
+ alphabet = string.ascii_letters + string.digits
+ nonce = ''.join(random.choice(alphabet) for _ in range(8))
+ self._to_publish.update(keyvals)
+ self._to_publish['requested'] = nonce
+ clear_flag(self.expand_name('ready'))
+
+ def label_instance(self, labels):
+ """
+ Request that the given labels be applied to this instance.
+
+ # Parameters
+ `labels` (dict): Mapping of labels names to values.
+ """
+ self._request({'instance-labels': dict(labels)})
+
+ def enable_instance_inspection(self):
+ """
+ Request the ability to inspect instances.
+ """
+ self._request({'enable-instance-inspection': True})
+
+ def enable_network_management(self):
+ """
+ Request the ability to manage networking.
+ """
+ self._request({'enable-network-management': True})
+
+ def enable_security_management(self):
+ """
+ Request the ability to manage security (e.g., firewalls).
+ """
+ self._request({'enable-security-management': True})
+
+ def enable_block_storage_management(self):
+ """
+ Request the ability to manage block storage.
+ """
+ self._request({'enable-block-storage-management': True})
+
+ def enable_dns_management(self):
+ """
+ Request the ability to manage DNS.
+ """
+ self._request({'enable-dns': True})
+
+ def enable_object_storage_access(self):
+ """
+ Request the ability to access object storage.
+ """
+ self._request({'enable-object-storage-access': True})
+
+ def enable_object_storage_management(self):
+ """
+ Request the ability to manage object storage.
+ """
+ self._request({'enable-object-storage-management': True})
diff --git a/kubernetes-worker/hooks/relations/http/.gitignore b/kubernetes-worker/hooks/relations/http/.gitignore
new file mode 100644
index 0000000..3374ec2
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/http/.gitignore
@@ -0,0 +1,5 @@
+# Emacs save files
+*~
+\#*\#
+.\#*
+
diff --git a/kubernetes-worker/hooks/relations/http/README.md b/kubernetes-worker/hooks/relations/http/README.md
new file mode 100644
index 0000000..3d7822a
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/http/README.md
@@ -0,0 +1,68 @@
+# Overview
+
+This interface layer implements the basic form of the `http` interface protocol,
+which is used for things such as reverse-proxies, load-balanced servers, REST
+service discovery, et cetera.
+
+# Usage
+
+## Provides
+
+By providing the `http` interface, your charm is providing an HTTP server that
+can be load-balanced, reverse-proxied, used as a REST endpoint, etc.
+
+Your charm need only provide the port on which it is serving its content, as
+soon as the `{relation_name}.available` state is set:
+
+```python
+@when('website.available')
+def configure_website(website):
+ website.configure(port=hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `http` interface, your charm is consuming one or more HTTP
+servers, as a REST endpoint, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `services()` method returns a list of available HTTP services and their
+associated hosts and ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charms.reactive.helpers import data_changed
+
+@when('reverseproxy.available')
+def update_reverse_proxy_config(reverseproxy):
+ services = reverseproxy.services()
+ if not data_changed('reverseproxy.services', services):
+ return
+ for service in services:
+ for host in service['hosts']:
+ hookenv.log('{} has a unit {}:{}'.format(
+ services['service_name'],
+ host['hostname'],
+ host['port']))
+```
diff --git a/kubernetes-worker/hooks/relations/http/__init__.py b/kubernetes-worker/hooks/relations/http/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/http/interface.yaml b/kubernetes-worker/hooks/relations/http/interface.yaml
new file mode 100644
index 0000000..54e7748
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/http/interface.yaml
@@ -0,0 +1,4 @@
+name: http
+summary: Basic HTTP interface
+version: 1
+repo: https://git.launchpad.net/~bcsaller/charms/+source/http
diff --git a/kubernetes-worker/hooks/relations/http/provides.py b/kubernetes-worker/hooks/relations/http/provides.py
new file mode 100644
index 0000000..86fa9b3
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/http/provides.py
@@ -0,0 +1,67 @@
+import json
+
+from charmhelpers.core import hookenv
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpProvides(Endpoint):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def get_ingress_address(self, rel_id=None):
+ # If no rel_id is provided, we fallback to the first one
+ if rel_id is None:
+ rel_id = self.relations[0].relation_id
+ return hookenv.ingress_address(rel_id, hookenv.local_unit())
+
+ def configure(self, port, private_address=None, hostname=None):
+ ''' configure the address(es). private_address and hostname can
+ be None, a single string address/hostname, or a list of addresses
+ and hostnames. Note that if a list is passed, it is assumed both
+ private_address and hostname are either lists or None '''
+ for relation in self.relations:
+ ingress_address = self.get_ingress_address(relation.relation_id)
+ if type(private_address) is list or type(hostname) is list:
+ # build 3 lists to zip together that are the same length
+ length = max(len(private_address), len(hostname))
+ p = [port] * length
+ a = private_address + [ingress_address] *\
+ (length - len(private_address))
+ h = hostname + [ingress_address] * (length - len(hostname))
+ zipped_list = zip(p, a, h)
+ # now build an array of dictionaries from that in the desired
+ # format for the interface
+ data_list = [{'hostname': h, 'port': p, 'private-address': a}
+ for p, a, h in zipped_list]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ data = data_list.pop(0)
+ data['extended_data'] = json.dumps(data_list)
+
+ relation.to_publish_raw.update(data)
+ else:
+ relation.to_publish_raw.update({
+ 'hostname': hostname or ingress_address,
+ 'private-address': private_address or ingress_address,
+ 'port': port,
+ })
+
+ def set_remote(self, **kwargs):
+ # NB: This method provides backwards compatibility for charms that
+ # called RelationBase.set_remote. Most commonly, this was done by
+ # charms that needed to pass reverse proxy stanzas to http proxies.
+ # This type of interaction with base relation classes is discouraged,
+ # and should be handled with logic encapsulated in appropriate
+ # interfaces. Eventually, this method will be deprecated in favor of
+ # that behavior.
+ for relation in self.relations:
+ relation.to_publish_raw.update(kwargs)
diff --git a/kubernetes-worker/hooks/relations/http/requires.py b/kubernetes-worker/hooks/relations/http/requires.py
new file mode 100644
index 0000000..17ea6b7
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/http/requires.py
@@ -0,0 +1,76 @@
+import json
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpRequires(Endpoint):
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ if any(unit.received_raw['port'] for unit in self.all_joined_units):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def services(self):
+ """
+ Returns a list of available HTTP services and their associated hosts
+ and ports.
+
+ The return value is a list of dicts of the following form::
+
+ [
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'private-address': private_address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+ ]
+ """
+ def build_service_host(data):
+ private_address = data['private-address']
+ host = data['hostname'] or private_address
+ if host and data['port']:
+ return (host, private_address, data['port'])
+ else:
+ return None
+
+ services = {}
+ for relation in self.relations:
+ service_name = relation.application_name
+ service = services.setdefault(service_name, {
+ 'service_name': service_name,
+ 'hosts': [],
+ })
+ host_set = set()
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ host = build_service_host(data)
+ if host:
+ host_set.add(host)
+
+ # if we have extended data, add it
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ host = build_service_host(ed)
+ if host:
+ host_set.add(host)
+
+ service['hosts'] = [
+ {'hostname': h, 'private-address': pa, 'port': p}
+ for h, pa, p in sorted(host_set)
+ ]
+
+ ret = [s for s in services.values() if s['hosts']]
+ return ret
diff --git a/kubernetes-worker/hooks/relations/kube-control/.travis.yml b/kubernetes-worker/hooks/relations/kube-control/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-control/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-worker/hooks/relations/kube-control/README.md b/kubernetes-worker/hooks/relations/kube-control/README.md
new file mode 100644
index 0000000..6f9ecb7
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-control/README.md
@@ -0,0 +1,171 @@
+# kube-control interface
+
+This interface provides communication between master and workers in a
+Kubernetes cluster.
+
+
+## Provides (kubernetes-master side)
+
+
+### States
+
+* `kube-control.connected`
+
+ Enabled when a worker has joined the relation.
+
+* `kube-control.gpu.available`
+
+ Enabled when any worker has indicated that it is running in gpu mode.
+
+* `kube-control.departed`
+
+ Enabled when any worker has indicated that it is leaving the cluster.
+
+
+* `kube-control.auth.requested`
+
+ Enabled when an authentication credential is requested. This state is
+ temporary and will be removed once the units authentication request has
+ been fulfilled.
+
+### Methods
+
+* `kube_control.set_dns(port, domain, sdn_ip)`
+
+ Sends DNS info to the connected worker(s).
+
+
+* `kube_control.auth_user()`
+
+ Returns a list of the requested username and group requested for
+ authentication.
+
+* `kube_control.sign_auth_request(scope, user, kubelet_token, proxy_token, client_token)`
+
+ Sends authentication tokens to the unit scope for the requested user
+ and kube-proxy services.
+
+* `kube_control.set_cluster_tag(cluster_tag)`
+
+ Sends a tag used to identify resources that are part of the cluster to the
+ connected worker(s).
+
+* `kube_control.flush_departed()`
+
+ Returns the unit departing the kube_control relationship so you can do any
+ post removal cleanup. Such as removing authentication tokens for the unit.
+ Invoking this method will also remove the `kube-control.departed` state
+
+* `kube_control.set_registry_location(registry_location)`
+ Sends the container image registry location to the connected worker(s).
+
+### Examples
+
+```python
+
+@when('kube-control.connected')
+def send_dns(kube_control):
+ # send port, domain, sdn_ip to the remote side
+ kube_control.set_dns(53, "cluster.local", "10.1.0.10")
+
+@when('kube-control.gpu.available')
+def on_gpu_available(kube_control):
+ # The remote side is gpu-enable, handle it somehow
+ assert kube_control.get_gpu() == True
+
+
+@when('kube-control.departed')
+@when('leadership.is_leader')
+def flush_auth_for_departed(kube_control):
+ ''' Unit has left the cluster and needs to have its authentication
+ tokens removed from the token registry '''
+ departing_unit = kube_control.flush_departed()
+
+```
+
+## Requires (kubernetes-worker side)
+
+
+### States
+
+* `kube-control.connected`
+
+ Enabled when a master has joined the relation.
+
+* `kube-control.dns.available`
+
+ Enabled when DNS info is available from the master.
+
+* `kube-control.auth.available`
+
+ Enabled when authentication credentials are present from the master.
+
+* `kube-control.cluster_tag.available`
+
+ Enabled when cluster tag is present from the master.
+
+* `kube-control.registry_location.available`
+
+ Enabled when registry location is present from the master.
+
+### Methods
+
+* `kube_control.get_dns()`
+
+ Returns a dictionary of DNS info sent by the master. The keys in the
+ dict are: domain, private-address, sdn-ip, port.
+
+* `kube_control.set_gpu(enabled=True)`
+
+ Tell the master that we are gpu-enabled.
+
+* `kube_control.get_auth_credentials(user)`
+
+ Returns a dict with the users authentication credentials.
+
+* `set_auth_request(kubelet, group='system:nodes')`
+
+ Issue an authentication request against the master to receive token based
+ auth credentials in return.
+
+* `kube_control.get_cluster_tag()`
+
+ Returns the cluster tag provided by the master.
+
+* `kube_control.get_registry_location()`
+
+ Returns the container image registry location provided by the master.
+
+### Examples
+
+```python
+
+@when('kube-control.dns.available')
+def on_dns_available(kube_control):
+ # Remote side has sent DNS info
+ dns = kube_control.get_dns()
+ print(context['domain'])
+ print(context['private-address'])
+ print(context['sdn-ip'])
+ print(context['port'])
+
+@when('kube-control.connected')
+def send_gpu(kube_control):
+ # Tell the master that we're gpu-enabled
+ kube_control.set_gpu(True)
+
+@when('kube-control.auth.available')
+def display_auth_tokens(kube_control):
+ # Remote side has sent auth info
+ auth = kube_control.get_auth_credentials('root')
+ print(auth['kubelet_token'])
+ print(auth['proxy_token'])
+ print(auth['client_token'])
+
+@when('kube-control.connected')
+@when_not('kube-control.auth.available')
+def request_auth_credentials(kube_control):
+ # Request an admin user with sudo level access named 'root'
+ kube_control.set_auth_request('root', group='system:masters')
+
+```
diff --git a/kubernetes-worker/hooks/relations/kube-control/__init__.py b/kubernetes-worker/hooks/relations/kube-control/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/kube-control/interface.yaml b/kubernetes-worker/hooks/relations/kube-control/interface.yaml
new file mode 100644
index 0000000..2f0b187
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-control/interface.yaml
@@ -0,0 +1,6 @@
+name: kube-control
+summary: Provides master-worker communication.
+version: 1
+maintainer: "Tim Van Steenburgh "
+ignore:
+- tests
diff --git a/kubernetes-worker/hooks/relations/kube-control/provides.py b/kubernetes-worker/hooks/relations/kube-control/provides.py
new file mode 100644
index 0000000..9d3a829
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-control/provides.py
@@ -0,0 +1,152 @@
+#!/usr/local/sbin/charm-env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from charms.reactive import (
+ Endpoint,
+ toggle_flag,
+ set_flag,
+ data_changed
+)
+
+from charmhelpers.core import (
+ hookenv,
+ unitdata
+)
+
+
+DB = unitdata.kv()
+
+
+class KubeControlProvider(Endpoint):
+ """
+ Implements the kubernetes-master side of the kube-control interface.
+ """
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.gpu.available'),
+ self.is_joined and self._get_gpu())
+ requests_data_id = self.expand_name('{endpoint_name}.requests')
+ requests = self.auth_user()
+ if data_changed(requests_data_id, requests):
+ set_flag(self.expand_name('{endpoint_name}.requests.changed'))
+
+ def set_dns(self, port, domain, sdn_ip, enable_kube_dns):
+ """
+ Send DNS info to the remote units.
+
+ We'll need the port, domain, and sdn_ip of the dns service. If
+ sdn_ip is not required in your deployment, the units private-ip
+ is available implicitly.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'port': port,
+ 'domain': domain,
+ 'sdn-ip': sdn_ip,
+ 'enable-kube-dns': enable_kube_dns,
+ })
+
+ def auth_user(self):
+ """
+ Return the kubelet_user value on the wire from the requestors.
+ """
+ requests = []
+
+ for unit in self.all_joined_units:
+ requests.append(
+ (unit.unit_name,
+ {'user': unit.received_raw.get('kubelet_user'),
+ 'group': unit.received_raw.get('auth_group')})
+ )
+
+ requests.sort()
+ return requests
+
+ def sign_auth_request(self, scope, user, kubelet_token, proxy_token,
+ client_token):
+ """
+ Send authorization tokens to the requesting unit.
+ """
+ cred = {
+ 'scope': scope,
+ 'kubelet_token': kubelet_token,
+ 'proxy_token': proxy_token,
+ 'client_token': client_token
+ }
+
+ if not DB.get('creds'):
+ DB.set('creds', {})
+
+ all_creds = DB.get('creds')
+ all_creds[user] = cred
+ DB.set('creds', all_creds)
+
+ for relation in self.relations:
+ relation.to_publish.update({
+ 'creds': all_creds
+ })
+
+ def clear_creds(self):
+ """
+ Clear creds from the relation. This is used by non-leader units to stop
+ advertising creds so that the leader can assume full control of them.
+ """
+ DB.unset('creds')
+ for relation in self.relations:
+ relation.to_publish_raw['creds'] = ''
+
+ def _get_gpu(self):
+ """
+ Return True if any remote worker is gpu-enabled.
+ """
+ for unit in self.all_joined_units:
+ if unit.received_raw.get('gpu') == 'True':
+ hookenv.log('Unit {} has gpu enabled'.format(unit))
+ return True
+
+ return False
+
+ def set_cluster_tag(self, cluster_tag):
+ """
+ Send the cluster tag to the remote units.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'cluster-tag': cluster_tag
+ })
+
+ def set_registry_location(self, registry_location):
+ """
+ Send the registry location to the remote units.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'registry-location': registry_location
+ })
+
+ def set_cohort_keys(self, cohort_keys):
+ """
+ Send the cohort snapshot keys.
+ """
+ for relation in self.relations:
+ relation.to_publish['cohort-keys'] = cohort_keys
+
+ def set_default_cni(self, default_cni):
+ """
+ Send the default CNI. The default_cni value should be a string
+ containing the name of a related CNI application to use as the
+ default CNI. For example: "flannel" or "calico". If no default has
+ been chosen then "" can be sent instead.
+ """
+ for relation in self.relations:
+ relation.to_publish['default-cni'] = default_cni
diff --git a/kubernetes-worker/hooks/relations/kube-control/requires.py b/kubernetes-worker/hooks/relations/kube-control/requires.py
new file mode 100644
index 0000000..72ce1f6
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-control/requires.py
@@ -0,0 +1,149 @@
+#!/usr/local/sbin/charm-env python3
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import (
+ Endpoint,
+ toggle_flag,
+)
+
+from charmhelpers.core.hookenv import log
+
+
+class KubeControlRequirer(Endpoint):
+ """
+ Implements the kubernetes-worker side of the kube-control interface.
+ """
+ def manage_flags(self):
+ """
+ Set states corresponding to the data we have.
+ """
+ toggle_flag(
+ self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.dns.available'),
+ self.is_joined and self.dns_ready())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.auth.available'),
+ self.is_joined and self._has_auth_credentials())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.cluster_tag.available'),
+ self.is_joined and self.get_cluster_tag())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.registry_location.available'),
+ self.is_joined and self.get_registry_location())
+ toggle_flag(
+ self.expand_name('{endpoint_name}.cohort_keys.available'),
+ self.is_joined and self.cohort_keys)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.default_cni.available'),
+ self.is_joined and self.get_default_cni() is not None)
+
+ def get_auth_credentials(self, user):
+ """
+ Return the authentication credentials.
+ """
+ rx = {}
+ for unit in self.all_joined_units:
+ rx.update(unit.received.get('creds', {}))
+ if not rx:
+ return None
+
+ if user in rx:
+ return {
+ 'user': user,
+ 'kubelet_token': rx[user]['kubelet_token'],
+ 'proxy_token': rx[user]['proxy_token'],
+ 'client_token': rx[user]['client_token']
+ }
+ else:
+ return None
+
+ def get_dns(self):
+ """
+ Return DNS info provided by the master.
+ """
+ rx = self.all_joined_units.received_raw
+
+ return {
+ 'port': rx.get('port'),
+ 'domain': rx.get('domain'),
+ 'sdn-ip': rx.get('sdn-ip'),
+ 'enable-kube-dns': rx.get('enable-kube-dns'),
+ }
+
+ def dns_ready(self):
+ """
+ Return True if we have all DNS info from the master.
+ """
+ keys = ['port', 'domain', 'sdn-ip', 'enable-kube-dns']
+ dns_info = self.get_dns()
+ return (set(dns_info.keys()) == set(keys) and
+ dns_info['enable-kube-dns'] is not None)
+
+ def set_auth_request(self, kubelet, group='system:nodes'):
+ """
+ Tell the master that we are requesting auth, and to use this
+ hostname for the kubelet system account.
+
+ Param groups - Determines the level of eleveted privleges of the
+ requested user. Can be overridden to request sudo level access on the
+ cluster via changing to system:masters.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'kubelet_user': kubelet,
+ 'auth_group': group
+ })
+
+ def set_gpu(self, enabled=True):
+ """
+ Tell the master that we're gpu-enabled (or not).
+ """
+ log('Setting gpu={} on kube-control relation'.format(enabled))
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'gpu': enabled
+ })
+
+ def _has_auth_credentials(self):
+ """
+ Predicate method to signal we have authentication credentials.
+ """
+ if self.all_joined_units.received_raw.get('creds'):
+ return True
+
+ def get_cluster_tag(self):
+ """
+ Tag for identifying resources that are part of the cluster.
+ """
+ return self.all_joined_units.received_raw.get('cluster-tag')
+
+ def get_registry_location(self):
+ """
+ URL for container image registry.
+ """
+ return self.all_joined_units.received_raw.get('registry-location')
+
+ @property
+ def cohort_keys(self):
+ """
+ The cohort snapshot keys sent by the masters.
+ """
+ return self.all_joined_units.received['cohort-keys']
+
+ def get_default_cni(self):
+ """
+ Default CNI network to use.
+ """
+ return self.all_joined_units.received['default-cni']
diff --git a/kubernetes-worker/hooks/relations/kube-dns/README.md b/kubernetes-worker/hooks/relations/kube-dns/README.md
new file mode 100644
index 0000000..15ce8bb
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-dns/README.md
@@ -0,0 +1,21 @@
+# Kube-DNS
+
+This interface allows a DNS provider, such as CoreDNS, to provide name
+resolution for a Kubernetes cluster.
+
+(Note: this interface was previously used by the Kubernetes Master charm to
+communicate the DNS provider info to the Kubernetes Worker charm, but that
+usage was folded into the `kube-control` interface.)
+
+
+# Provides
+
+The provider should look for the `{endpoint_name}.connected` flag and call
+the `set_dns_info` method with the `domain`, `sdn_ip`, and `port` info (note:
+these must be provided as keyword arguments).
+
+# Requires
+
+The requirer should look for the `{endpoint_name}.available` flag and call the
+`details` method, which will return a dictionary with the `domain`, `sdn-ip`,
+and `port` keys.
diff --git a/kubernetes-worker/hooks/relations/kube-dns/__init__.py b/kubernetes-worker/hooks/relations/kube-dns/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/kube-dns/interface.yaml b/kubernetes-worker/hooks/relations/kube-dns/interface.yaml
new file mode 100644
index 0000000..2de32b0
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-dns/interface.yaml
@@ -0,0 +1,4 @@
+name: kube-dns
+summary: provides the kubernetes dns settings
+version: 1
+maintainer: "Charles Butler "
diff --git a/kubernetes-worker/hooks/relations/kube-dns/provides.py b/kubernetes-worker/hooks/relations/kube-dns/provides.py
new file mode 100644
index 0000000..a7199c3
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-dns/provides.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint, toggle_flag
+
+
+class KubeDNSProvider(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+
+ def set_dns_info(self, *, domain, sdn_ip, port):
+ '''Set the domain, sdn_ip, and port of the DNS provider.'''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'domain': domain,
+ 'sdn-ip': sdn_ip,
+ 'port': port,
+ })
diff --git a/kubernetes-worker/hooks/relations/kube-dns/requires.py b/kubernetes-worker/hooks/relations/kube-dns/requires.py
new file mode 100644
index 0000000..9595c4a
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kube-dns/requires.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint, toggle_flag
+
+
+class KubeDNSRequireer(Endpoint):
+ def manage_flags(self):
+ '''Set flags according to whether we have DNS provider details.'''
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.has_info())
+
+ def details(self):
+ '''Return the DNS provider details.'''
+ return {
+ 'domain': self._get_value('domain'),
+ 'sdn-ip': self._get_value('sdn-ip'),
+ 'port': self._get_value('port'),
+ }
+
+ def has_info(self):
+ ''' Determine if we have all needed info'''
+ return all(self.details().values())
+
+ def _get_value(self, key):
+ return self.all_joined_units.received_raw.get(key)
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml b/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml
new file mode 100644
index 0000000..d2be8be
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml
@@ -0,0 +1,9 @@
+language: python
+python:
+ - "3.5"
+ - "3.6"
+ - "3.7"
+install:
+ - pip install tox-travis
+script:
+ - tox
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/README.md b/kubernetes-worker/hooks/relations/kubernetes-cni/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/__init__.py b/kubernetes-worker/hooks/relations/kubernetes-cni/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml b/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml
new file mode 100644
index 0000000..7e3c123
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml
@@ -0,0 +1,6 @@
+name: kubernetes-cni
+summary: Interface for relating various CNI implementations
+version: 0
+maintainer: "George Kraft "
+ignore:
+- tests
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py
new file mode 100644
index 0000000..0b4aada
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+
+from charmhelpers.core import hookenv
+from charms.reactive import Endpoint
+from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag
+
+
+class CNIPluginProvider(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.connected'),
+ self.is_joined)
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.config_available())
+ if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')):
+ clear_flag(self.expand_name('{endpoint_name}.configured'))
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ def set_config(self, is_master, kubeconfig_path):
+ ''' Relays a dict of kubernetes configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'is_master': is_master,
+ 'kubeconfig_path': kubeconfig_path
+ })
+ set_flag(self.expand_name('{endpoint_name}.configured'))
+
+ def config_available(self):
+ ''' Ensures all config from the CNI plugin is available. '''
+ goal_state = hookenv.goal_state()
+ related_apps = [
+ app for app in goal_state.get('relations', {}).get(self.endpoint_name, '')
+ if '/' not in app
+ ]
+ if not related_apps:
+ return False
+ configs = self.get_configs()
+ return all(
+ 'cidr' in config and 'cni-conf-file' in config
+ for config in [
+ configs.get(related_app, {}) for related_app in related_apps
+ ]
+ )
+
+ def get_config(self, default=None):
+ ''' Get CNI config for one related application.
+
+ If default is specified, and there is a related application with a
+ matching name, then that application is chosen. Otherwise, the
+ application is chosen alphabetically.
+
+ Whichever application is chosen, that application's CNI config is
+ returned.
+ '''
+ configs = self.get_configs()
+ if not configs:
+ return {}
+ elif default and default not in configs:
+ msg = 'relation not found for default CNI %s, ignoring' % default
+ hookenv.log(msg, level='WARN')
+ return self.get_config()
+ elif default:
+ return configs.get(default, {})
+ else:
+ return configs.get(sorted(configs)[0], {})
+
+ def get_configs(self):
+ ''' Get CNI configs for all related applications.
+
+ This returns a mapping of application names to CNI configs. Here's an
+ example return value:
+ {
+ 'flannel': {
+ 'cidr': '10.1.0.0/16',
+ 'cni-conf-file': '10-flannel.conflist'
+ },
+ 'calico': {
+ 'cidr': '192.168.0.0/16',
+ 'cni-conf-file': '10-calico.conflist'
+ }
+ }
+ '''
+ return {
+ relation.application_name: relation.joined_units.received_raw
+ for relation in self.relations if relation.application_name
+ }
diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py
new file mode 100644
index 0000000..039b912
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+
+from charms.reactive import Endpoint
+from charms.reactive import when_any, when_not
+from charms.reactive import set_state, remove_state
+
+
+class CNIPluginClient(Endpoint):
+
+ @when_any('endpoint.{endpoint_name}.joined',
+ 'endpoint.{endpoint_name}.changed')
+ def changed(self):
+ ''' Indicate the relation is connected, and if the relation data is
+ set it is also available. '''
+ set_state(self.expand_name('{endpoint_name}.connected'))
+ config = self.get_config()
+ if config['is_master'] == 'True':
+ set_state(self.expand_name('{endpoint_name}.is-master'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ elif config['is_master'] == 'False':
+ set_state(self.expand_name('{endpoint_name}.is-worker'))
+ set_state(self.expand_name('{endpoint_name}.configured'))
+ else:
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+ remove_state(self.expand_name('endpoint.{endpoint_name}.changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ ''' Indicate the relation is no longer available and not connected. '''
+ remove_state(self.expand_name('{endpoint_name}.connected'))
+ remove_state(self.expand_name('{endpoint_name}.is-master'))
+ remove_state(self.expand_name('{endpoint_name}.is-worker'))
+ remove_state(self.expand_name('{endpoint_name}.configured'))
+
+ def get_config(self):
+ ''' Get the kubernetes configuration information. '''
+ return self.all_joined_units.received_raw
+
+ def set_config(self, cidr, cni_conf_file):
+ ''' Sets the CNI configuration information. '''
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'cidr': cidr,
+ 'cni-conf-file': cni_conf_file
+ })
diff --git a/kubernetes-worker/hooks/relations/mount/.gitignore b/kubernetes-worker/hooks/relations/mount/.gitignore
new file mode 100644
index 0000000..f3558c7
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/.gitignore
@@ -0,0 +1,105 @@
+# emacs files
+*~
+\#*\#
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# dotenv
+.env
+
+# virtualenv
+.venv
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
diff --git a/kubernetes-worker/hooks/relations/mount/LICENSE b/kubernetes-worker/hooks/relations/mount/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/mount/README.md b/kubernetes-worker/hooks/relations/mount/README.md
new file mode 100644
index 0000000..99c2394
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/README.md
@@ -0,0 +1,2 @@
+# interface-mount
+Interface layer for connecting to mounts to a charm such as NFS
diff --git a/kubernetes-worker/hooks/relations/mount/__init__.py b/kubernetes-worker/hooks/relations/mount/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/mount/copyright b/kubernetes-worker/hooks/relations/mount/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/mount/interface.yaml b/kubernetes-worker/hooks/relations/mount/interface.yaml
new file mode 100644
index 0000000..ff68ab1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/interface.yaml
@@ -0,0 +1,4 @@
+name: mount
+summary: Interface for mounting filesystems like NFS.
+version: 1
+maintainer: Mike Wilson
diff --git a/kubernetes-worker/hooks/relations/mount/provides.py b/kubernetes-worker/hooks/relations/mount/provides.py
new file mode 100644
index 0000000..b68b0a8
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/provides.py
@@ -0,0 +1,39 @@
+from charms.reactive import when_any
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class MountProvides(Endpoint):
+
+ @when_any('endpoint.{endpoint_name}.changed',
+ 'endpoint.{endpoint_name}.departed')
+ def changed(self):
+ set_flag(self.expand_name('{endpoint_name}.changed'))
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
+ clear_flag(self.expand_name('endpoint.{endpoint_name}.departed'))
+
+ def get_mount_requests(self):
+ return [{
+ 'identifier': relation.relation_id,
+ 'application_name': relation.joined_units.received_raw.get(
+ 'export_name', relation.application_name),
+ 'addresses': [
+ unit.received_raw.get('ingress-address',
+ unit.received_raw['private-address'])
+ for unit in relation.joined_units],
+ } for relation in self.relations]
+
+ def configure(self, responses):
+ for response in responses:
+ relation = self.relations[response['identifier']]
+ relation.to_publish_raw.update({
+ 'mountpoint': response['mountpoint'],
+ 'fstype': response['fstype'],
+ 'options': response['options'],
+ })
+ for key in ('export_name', 'hostname'):
+ if key in response:
+ relation.to_publish_raw[key] = response[key]
+ elif key in relation.to_publish_raw:
+ del relation.to_publish_raw[key]
+ clear_flag(self.expand_name('{endpoint_name}.changed'))
diff --git a/kubernetes-worker/hooks/relations/mount/requires.py b/kubernetes-worker/hooks/relations/mount/requires.py
new file mode 100644
index 0000000..6f503ed
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/mount/requires.py
@@ -0,0 +1,71 @@
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class MountRequires(Endpoint):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.joined'))
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ if any(unit.received_raw['mountpoint']
+ for unit in self.all_joined_units):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.joined'))
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def set_export_name(self, export_name):
+ for relation in self.relations:
+ relation.to_publish_raw['export_name'] = export_name
+
+ def mounts(self):
+ """
+ Returns a list of available mounts and their associated data.
+
+ The return value is a list of dicts of the following form::
+
+ [
+ {
+ 'mount_name': name_of_mount,
+ 'mounts': [
+ {
+ 'hostname': hostname,
+ 'mountpoint': mountpoint,
+ 'fstype': mounttype,
+ 'options': options
+ },
+ # ...
+ ],
+ },
+ # ...
+ ]
+ """
+ mounts = {}
+ for relation in self.relations:
+ for unit in relation.joined_units:
+ mount_name = unit.received_raw.get(
+ 'export_name', relation.application_name)
+ mount = mounts.setdefault(mount_name, {
+ 'mount_name': mount_name,
+ 'mounts': [],
+ })
+ data = unit.received_raw
+ mountpoint = data['mountpoint']
+ fstype = data['fstype']
+ options = data['options']
+ host = data['hostname'] or \
+ data['private-address']
+ if host and mountpoint and fstype and options:
+ mount['mounts'].append({
+ 'hostname': host,
+ 'mountpoint': mountpoint,
+ 'fstype': fstype,
+ 'options': options
+ })
+ return [m for m in mounts.values() if m['mounts']]
diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/README.md b/kubernetes-worker/hooks/relations/nrpe-external-master/README.md
new file mode 100644
index 0000000..e33deb8
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/nrpe-external-master/README.md
@@ -0,0 +1,66 @@
+# nrpe-external-master interface
+
+Use this interface to register nagios checks in your charm layers.
+
+## Purpose
+
+This interface is designed to interoperate with the
+[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm.
+
+## How to use in your layers
+
+The event handler for `nrpe-external-master.available` is called with an object
+through which you can register your own custom nagios checks, when a relation
+is established with `nrpe-external-master:nrpe-external-master`.
+
+This object provides a method,
+
+_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_)
+
+which is called to register a nagios plugin check for your service.
+
+All arguments are required.
+
+*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable.
+
+*name* is the name of the check registered in nagios
+
+*description* is some text that describes what the check is for and what it does
+
+*context* is the nagios context name, something that identifies your application
+
+*unit* is `hookenv.local_unit()`
+
+The nrpe subordinate installs `check_http`, so you can use it like this:
+
+```
+@when('nrpe-external-master.available')
+def setup_nagios(nagios):
+ config = hookenv.config()
+ unit_name = hookenv.local_unit()
+ nagios.add_check(['/usr/lib/nagios/plugins/check_http',
+ '-I', '127.0.0.1', '-p', str(config['port']),
+ '-e', " 200 OK", '-u', '/publickey'],
+ name="check_http",
+ description="Verify my awesome service is responding",
+ context=config["nagios_context"],
+ unit=unit_name,
+ )
+```
+If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service.
+
+Consult the nagios documentation for more information on [how to write your own
+plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html)
+or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need.
+
+## Example deployment
+
+```
+$ juju deploy your-awesome-charm
+$ juju deploy nrpe-external-master --config site-nagios.yaml
+$ juju add-relation your-awesome-charm nrpe-external-master
+```
+
+where `site-nagios.yaml` has the necessary configuration settings for the
+subordinate to connect to nagios.
+
diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/__init__.py b/kubernetes-worker/hooks/relations/nrpe-external-master/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml b/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml
new file mode 100644
index 0000000..859a423
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml
@@ -0,0 +1,3 @@
+name: nrpe-external-master
+summary: Nagios interface
+version: 1
diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py b/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py
new file mode 100644
index 0000000..b6c7f0d
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py
@@ -0,0 +1,91 @@
+import datetime
+import os
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import hook
+from charms.reactive import RelationBase
+from charms.reactive import scopes
+
+
+class NrpeExternalMasterProvides(RelationBase):
+ scope = scopes.GLOBAL
+
+ @hook('{provides:nrpe-external-master}-relation-{joined,changed}')
+ def changed_nrpe(self):
+ self.set_state('{relation_name}.available')
+
+ @hook('{provides:nrpe-external-master}-relation-{broken,departed}')
+ def broken_nrpe(self):
+ self.remove_state('{relation_name}.available')
+
+ def add_check(self, args, name=None, description=None, context=None,
+ servicegroups=None, unit=None):
+ nagios_files = self.get_local('nagios.check.files', [])
+
+ if not unit:
+ unit = hookenv.local_unit()
+ unit = unit.replace('/', '-')
+ context = self.get_remote('nagios_host_context', context)
+ host_name = self.get_remote('nagios_hostname',
+ '%s-%s' % (context, unit))
+
+ check_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+command[%(check_name)s]=%(check_args)s
+"""
+ service_tmpl = """
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {
+ use active-service
+ host_name %(host_name)s
+ service_description %(description)s
+ check_command check_nrpe!%(check_name)s
+ servicegroups %(servicegroups)s
+}
+"""
+ check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name)
+ with open(check_filename, "w") as fh:
+ fh.write(check_tmpl % {
+ 'check_args': ' '.join(args),
+ 'check_name': name,
+ })
+ nagios_files.append(check_filename)
+
+ service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % (
+ unit, name)
+ with open(service_filename, "w") as fh:
+ fh.write(service_tmpl % {
+ 'servicegroups': servicegroups or context,
+ 'context': context,
+ 'description': description,
+ 'check_name': name,
+ 'host_name': host_name,
+ 'unit_name': unit,
+ })
+ nagios_files.append(service_filename)
+
+ self.set_local('nagios.check.files', nagios_files)
+
+ def removed(self):
+ files = self.get_local('nagios.check.files', [])
+ for f in files:
+ try:
+ os.unlink(f)
+ except Exception as e:
+ hookenv.log("failed to remove %s: %s" % (f, e))
+ self.set_local('nagios.check.files', [])
+ self.remove_state('{relation_name}.removed')
+
+ def added(self):
+ self.updated()
+
+ def updated(self):
+ relation_info = {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ }
+ self.set_remote(**relation_info)
diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/requires.py b/kubernetes-worker/hooks/relations/nrpe-external-master/requires.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/.gitignore b/kubernetes-worker/hooks/relations/openstack-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/LICENSE b/kubernetes-worker/hooks/relations/openstack-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/README.md b/kubernetes-worker/hooks/relations/openstack-integration/README.md
new file mode 100644
index 0000000..ae021c2
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `openstack-integration` interface communciation
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:openstack-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:openstack-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `openstack-integration` interface protocol:
+
+```yaml
+requires:
+ openstack:
+ interface: openstack-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the openstack-integrator charm)
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/__init__.py b/kubernetes-worker/hooks/relations/openstack-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/copyright b/kubernetes-worker/hooks/relations/openstack-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md b/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md
new file mode 100644
index 0000000..ee17ac6
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md
@@ -0,0 +1,108 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the
+OpenStack integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for OpenStack integration
+ features. The OpenStack integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+
+
+
+
+A list of all of the [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.all_requests.IntegrationRequests) that have been made.
+
+
new_requests
+
+
+A list of the new or updated [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.new_requests.IntegrationRequests) that have been made.
+
+
mark_completed
+
+```python
+OpenStackIntegrationProvides.mark_completed()
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(unit)
+```
+
+A request for integration from a single remote unit.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(auth_url,
+ region,
+ username,
+ password,
+ user_domain_name,
+ project_domain_name,
+ project_name,
+ endpoint_tls_ca,
+ version=None)
+```
+
+Set the credentials for this request.
+
+
set_lbaas_config
+
+```python
+IntegrationRequest.set_lbaas_config(subnet_id,
+ floating_network_id,
+ lb_method,
+ manage_security_groups,
+ has_octavia=None)
+```
+
+Set the load-balancer-as-a-service config for this request.
+
+
set_block_storage_config
+
+```python
+IntegrationRequest.set_block_storage_config(bs_version, trust_device_path,
+ ignore_volume_az)
+```
+
+Set the block storage config for this request.
+
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md b/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md
new file mode 100644
index 0000000..510e292
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md
@@ -0,0 +1,160 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with OpenStack native features. The integration will be
+provided by the OpenStack integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of OpenStack
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific OpenStack features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the OpenStack instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+
+
OpenStackIntegrationRequires
+
+```python
+OpenStackIntegrationRequires(endpoint_name, relation_ids=None)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.openstack.ready')
+def openstack_integration_ready():
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+ update_config_enable_openstack(openstack)
+```
+
+
auth_url
+
+
+The authentication endpoint URL.
+
+
bs_version
+
+
+What block storage API version to use, `auto` if autodetection is
+desired, or `None` to use the default.
+
+
endpoint_tls_ca
+
+
+Optional base64-encoded CA certificate for the authentication endpoint,
+or None.
+
+
floating_network_id
+
+
+Optional floating network ID, or None.
+
+
has_octavia
+
+
+Whether the underlying OpenStack supports Octavia instead of
+Neutron-based LBaaS.
+
+Will either be True, False, or None if it could not be determined for
+some reason (typically due to connecting to an older integrator charm).
+
+
ignore_volume_az
+
+
+Whether to ignore availability zones when attaching Cinder volumes.
+
+Will be `True`, `False`, or `None`.
+
+
is_changed
+
+
+Whether or not the request for this instance has changed.
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
+
+
lb_method
+
+
+Optional load-balancer method, or None.
+
+
manage_security_groups
+
+
+Whether or not the Load Balancer should automatically manage security
+group rules.
+
+Will be `True` or `False`.
+
+
password
+
+
+The password.
+
+
project_domain_name
+
+
+The project domain name.
+
+
project_name
+
+
+The project name, also known as the tenant ID.
+
+
region
+
+
+The region name.
+
+
subnet_id
+
+
+Optional subnet ID to work in, or None.
+
+
trust_device_path
+
+
+Whether to trust the block device name provided by Ceph.
+
+Will be `True`, `False`, or `None`.
+
+
user_domain_name
+
+
+The user domain name.
+
+
username
+
+
+The username.
+
+
version
+
+
+Optional version number for the APIs or None.
+
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml b/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml
new file mode 100644
index 0000000..a94fed4
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: openstack-integration
+summary: Interface for connecting to the OpenStack integrator charm.
+version: 1
+maintainer: Cory Johns
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/make_docs b/kubernetes-worker/hooks/relations/openstack-integration/make_docs
new file mode 100644
index 0000000..a09c66f
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'openstack': {'interface': 'openstack'}},
+ 'provides': {'openstack': {'interface': 'openstack'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/provides.py b/kubernetes-worker/hooks/relations/openstack-integration/provides.py
new file mode 100644
index 0000000..7aa8146
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/provides.py
@@ -0,0 +1,152 @@
+"""
+This is the provides side of the interface layer, for use only by the
+OpenStack integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for OpenStack integration
+ features. The OpenStack integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class OpenStackIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.openstack.requests-pending')
+ def handle_requests():
+ openstack = endpoint_from_flag('endpoint.openstack.requests-pending')
+ for request in openstack.requests:
+ request.set_credentials(layer.openstack.get_user_credentials())
+ openstack.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.all_requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def all_requests(self):
+ """
+ A list of all of the #IntegrationRequests that have been made.
+ """
+ if not hasattr(self, '_all_requests'):
+ self._all_requests = [IntegrationRequest(unit)
+ for unit in self.all_joined_units]
+ return self._all_requests
+
+ @property
+ def new_requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that have been made.
+ """
+ is_changed = attrgetter('is_changed')
+ return list(filter(is_changed, self.all_requests))
+
+ def mark_completed(self):
+ """
+ Mark all requests as completed and remove the `requests-pending` flag.
+ """
+ clear_flag(self.expand_name('requests-pending'))
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ return not self.has_credentials
+
+ @property
+ def unit_name(self):
+ return self._unit.unit_name
+
+ def set_credentials(self,
+ auth_url,
+ region,
+ username,
+ password,
+ user_domain_name,
+ project_domain_name,
+ project_name,
+ endpoint_tls_ca,
+ version=None):
+ """
+ Set the credentials for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'auth_url': auth_url,
+ 'region': region,
+ 'username': username,
+ 'password': password,
+ 'user_domain_name': user_domain_name,
+ 'project_domain_name': project_domain_name,
+ 'project_name': project_name,
+ 'endpoint_tls_ca': endpoint_tls_ca,
+ 'version': version,
+ })
+
+ def set_lbaas_config(self,
+ subnet_id,
+ floating_network_id,
+ lb_method,
+ manage_security_groups,
+ has_octavia=None):
+ """
+ Set the load-balancer-as-a-service config for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'subnet_id': subnet_id,
+ 'floating_network_id': floating_network_id,
+ 'lb_method': lb_method,
+ 'manage_security_groups': manage_security_groups,
+ 'has_octavia': has_octavia,
+ })
+
+ def set_block_storage_config(self,
+ bs_version,
+ trust_device_path,
+ ignore_volume_az):
+ """
+ Set the block storage config for this request.
+ """
+ self._unit.relation.to_publish.update({
+ 'bs_version': bs_version,
+ 'trust_device_path': trust_device_path,
+ 'ignore_volume_az': ignore_volume_az,
+ })
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not credentials have been set via `set_credentials`.
+ """
+ return 'credentials' in self._unit.relation.to_publish
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml
new file mode 100644
index 0000000..aa0a286
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'OpenStack Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.OpenStackIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.OpenStackIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/openstack-integration/requires.py b/kubernetes-worker/hooks/relations/openstack-integration/requires.py
new file mode 100644
index 0000000..420f767
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/openstack-integration/requires.py
@@ -0,0 +1,254 @@
+"""
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with OpenStack native features. The integration will be
+provided by the OpenStack integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of OpenStack
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific OpenStack features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the OpenStack instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+"""
+
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag, is_flag_set
+from charms.reactive import data_changed
+
+
+class OpenStackIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.openstack.ready')
+ def openstack_integration_ready():
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+ update_config_enable_openstack(openstack)
+ ```
+ """
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single OpenStack integration application with a
+ single unit.
+ """
+ return self.relations[0].joined_units.received
+
+ @property
+ def _to_publish(self):
+ """
+ Helper to streamline access to received data since we expect to only
+ ever be connected to a single OpenStack integration application with a
+ single unit.
+ """
+ return self.relations[0].to_publish
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ # My middle name is ready. No, that doesn't sound right.
+ # I eat ready for breakfast.
+ was_ready = is_flag_set(self.expand_name('ready'))
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ if self.is_ready and was_ready and self.is_changed:
+ set_flag(self.expand_name('ready.changed'))
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ # Although more information can be passed, such as LBaaS access
+ # the minimum needed to be considered ready is defined here
+ return all(field is not None for field in [
+ self.auth_url,
+ self.username,
+ self.password,
+ self.user_domain_name,
+ self.project_domain_name,
+ self.project_name,
+ ])
+
+ @property
+ def is_changed(self):
+ """
+ Whether or not the request for this instance has changed.
+ """
+ return data_changed(self.expand_name('all-data'), [
+ self.auth_url,
+ self.region,
+ self.username,
+ self.password,
+ self.user_domain_name,
+ self.project_domain_name,
+ self.project_name,
+ self.endpoint_tls_ca,
+ self.subnet_id,
+ self.floating_network_id,
+ self.lb_method,
+ self.manage_security_groups,
+ ])
+
+ @property
+ def auth_url(self):
+ """
+ The authentication endpoint URL.
+ """
+ return self._received['auth_url']
+
+ @property
+ def region(self):
+ """
+ The region name.
+ """
+ return self._received['region']
+
+ @property
+ def username(self):
+ """
+ The username.
+ """
+ return self._received['username']
+
+ @property
+ def password(self):
+ """
+ The password.
+ """
+ return self._received['password']
+
+ @property
+ def user_domain_name(self):
+ """
+ The user domain name.
+ """
+ return self._received['user_domain_name']
+
+ @property
+ def project_domain_name(self):
+ """
+ The project domain name.
+ """
+ return self._received['project_domain_name']
+
+ @property
+ def project_name(self):
+ """
+ The project name, also known as the tenant ID.
+ """
+ return self._received['project_name']
+
+ @property
+ def endpoint_tls_ca(self):
+ """
+ Optional base64-encoded CA certificate for the authentication endpoint,
+ or None.
+ """
+ return self._received['endpoint_tls_ca'] or None
+
+ @property
+ def version(self):
+ """
+ Optional version number for the APIs or None.
+ """
+ return self._received['version'] or None
+
+ @property
+ def subnet_id(self):
+ """
+ Optional subnet ID to work in, or None.
+ """
+ return self._received['subnet_id']
+
+ @property
+ def floating_network_id(self):
+ """
+ Optional floating network ID, or None.
+ """
+ return self._received['floating_network_id']
+
+ @property
+ def lb_method(self):
+ """
+ Optional load-balancer method, or None.
+ """
+ return self._received['lb_method']
+
+ @property
+ def manage_security_groups(self):
+ """
+ Whether or not the Load Balancer should automatically manage security
+ group rules.
+
+ Will be `True` or `False`.
+ """
+ return self._received['manage_security_groups'] or False
+
+ @property
+ def bs_version(self):
+ """
+ What block storage API version to use, `auto` if autodetection is
+ desired, or `None` to use the default.
+ """
+ return self._received['bs_version']
+
+ @property
+ def trust_device_path(self):
+ """
+ Whether to trust the block device name provided by Ceph.
+
+ Will be `True`, `False`, or `None`.
+ """
+ return self._received['trust_device_path']
+
+ @property
+ def ignore_volume_az(self):
+ """
+ Whether to ignore availability zones when attaching Cinder volumes.
+
+ Will be `True`, `False`, or `None`.
+ """
+ return self._received['ignore_volume_az']
+
+ @property
+ def has_octavia(self):
+ """
+ Whether the underlying OpenStack supports Octavia instead of
+ Neutron-based LBaaS.
+
+ Will either be True, False, or None if it could not be determined for
+ some reason (typically due to connecting to an older integrator charm).
+ """
+ return self._received['has_octavia']
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/.gitignore b/kubernetes-worker/hooks/relations/tls-certificates/.gitignore
new file mode 100644
index 0000000..93813bc
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/.gitignore
@@ -0,0 +1,4 @@
+.tox
+__pycache__
+*.pyc
+_build
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/README.md b/kubernetes-worker/hooks/relations/tls-certificates/README.md
new file mode 100644
index 0000000..733da6d
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/README.md
@@ -0,0 +1,90 @@
+# Interface tls-certificates
+
+This is a [Juju][] interface layer that enables a charm which requires TLS
+certificates to relate to a charm which can provide them, such as [Vault][] or
+[EasyRSA][]
+
+To get started please read the [Introduction to PKI][] which defines some PKI
+terms, concepts and processes used in this document.
+
+# Example Usage
+
+Let's say you have a charm which needs a server certificate for a service it
+provides to other charms and a client certificate for a database it consumes
+from another charm. The charm provides its own service on the `clients`
+relation endpoint, and it consumes the database on the `db` relation endpoint.
+
+First, you must define the relation endpoint in your charm's `metadata.yaml`:
+
+```yaml
+requires:
+ cert-provider:
+ interface: tls-certificates
+```
+
+Next, you must ensure the interface layer is included in your `layer.yaml`:
+
+```yaml
+includes:
+ - interface:tls-certificates
+```
+
+Then, in your reactive code, add the following, changing `update_certs` to
+handle the certificates however your charm needs:
+
+```python
+from charmhelpers.core import hookenv, host
+from charms.reactive import endpoint_from_flag
+
+
+@when('cert-provider.ca.changed')
+def install_root_ca_cert():
+ cert_provider = endpoint_from_flag('cert-provider.ca.available')
+ host.install_ca_cert(cert_provider.root_ca_cert)
+ clear_flag('cert-provider.ca.changed')
+
+
+@when('cert-provider.available')
+def request_certificates():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+
+ # get ingress info
+ ingress_for_clients = hookenv.network_get('clients')['ingress-addresses']
+ ingress_for_db = hookenv.network_get('db')['ingress-addresses']
+
+ # use first ingress address as primary and any additional as SANs
+ server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1]
+ client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1]
+
+ # request a single server and single client cert; note that multiple certs
+ # of either type can be requested as long as they have unique common names
+ cert_provider.request_server_cert(server_cn, server_sans)
+ cert_provider.request_client_cert(client_cn, client_sans)
+
+
+@when('cert-provider.certs.changed')
+def update_certs():
+ cert_provider = endpoint_from_flag('cert-provider.available')
+ server_cert = cert_provider.server_certs[0] # only requested one
+ myserver.update_server_cert(server_cert.cert, server_cert.key)
+
+ client_cert = cert_provider.client_certs[0] # only requested one
+ myclient.update_client_cert(client_cert.cert, client_cert.key)
+ clear_flag('cert-provider.certs.changed')
+```
+
+
+# Reference
+
+ * [Requires](docs/requires.md)
+ * [Provides](docs/provides.md)
+
+# Contact Information
+
+Maintainer: Cory Johns <Cory.Johns@canonical.com>
+
+
+[Juju]: https://jujucharms.com
+[Vault]: https://jujucharms.com/u/openstack-charmers/vault
+[EasyRSA]: https://jujucharms.com/u/containers/easyrsa
+[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/__init__.py b/kubernetes-worker/hooks/relations/tls-certificates/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md
new file mode 100644
index 0000000..25d0e08
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md
@@ -0,0 +1,51 @@
+
+
+Name of the application which the request came from.
+
+:returns: Name of application
+:rtype: str
+
+
cert
+
+
+The cert published for this request, if any.
+
+
cert_type
+
+
+Type of certificate, 'server' or 'client', being requested.
+
+
resolve_unit_name
+
+```python
+CertificateRequest.resolve_unit_name(unit)
+```
+Return name of unit associated with this request.
+
+unit_name should be provided in the relation data to ensure
+compatability with cross-model relations. If the unit name
+is absent then fall back to unit_name attribute of the
+unit associated with this request.
+
+:param unit: Unit to extract name from
+:type unit: charms.reactive.endpoints.RelatedUnit
+:returns: Name of unit
+:rtype: str
+
+
Certificate
+
+```python
+Certificate(self, cert_type, common_name, cert, key)
+```
+
+Represents a created certificate and key.
+
+The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+be accessed either as properties or as the contents of the dict.
+
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md
new file mode 100644
index 0000000..c213546
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md
@@ -0,0 +1,212 @@
+
provides
+
+
+
TlsProvides
+
+```python
+TlsProvides(self, endpoint_name, relation_ids=None)
+```
+
+The provider's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[all_requests]: provides.md#provides.TlsProvides.all_requests
+[new_requests]: provides.md#provides.TlsProvides.new_requests
+[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+
+
all_published_certs
+
+
+List of all [Certificate][] instances that this provider has published
+for all related applications.
+
+
all_requests
+
+
+List of all requests that have been made.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('certs.regen',
+ 'tls.certs.available')
+def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_application_requests
+
+
+Filtered view of [new_requests][] that only includes application cert
+requests.
+
+Each will be an instance of [ApplicationCertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.application.certs.requested')
+def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_client_requests
+
+
+Filtered view of [new_requests][] that only includes client cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.client.certs.requested')
+def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_requests
+
+
+Filtered view of [all_requests][] that only includes requests that
+haven't been handled.
+
+Each will be an instance of [CertificateRequest][].
+
+This collection can also be further filtered by request type using
+[new_server_requests][] or [new_client_requests][].
+
+Example usage:
+
+```python
+@when('tls.certs.requested')
+def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
new_server_requests
+
+
+Filtered view of [new_requests][] that only includes server cert
+requests.
+
+Each will be an instance of [CertificateRequest][].
+
+Example usage:
+
+```python
+@when('tls.server.certs.requested')
+def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+```
+
+
set_ca
+
+```python
+TlsProvides.set_ca(certificate_authority)
+```
+
+Publish the CA to all related applications.
+
+
set_chain
+
+```python
+TlsProvides.set_chain(chain)
+```
+
+Publish the chain of trust to all related applications.
+
+
set_client_cert
+
+```python
+TlsProvides.set_client_cert(cert, key)
+```
+
+Deprecated. This is only for backwards compatibility.
+
+Publish a globally shared client cert and key.
+
+
set_server_cert
+
+```python
+TlsProvides.set_server_cert(scope, cert, key)
+```
+
+Deprecated. Use one of the [new_requests][] collections and
+`request.set_cert()` instead.
+
+Set the server cert and key for the request identified by `scope`.
+
+
+
+```python
+TlsProvides.get_server_requests()
+```
+
+Deprecated. Use the [new_requests][] or [server_requests][]
+collections instead.
+
+One provider can have many requests to generate server certificates.
+Return a map of all server request objects indexed by a unique
+identifier.
+
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md
new file mode 100644
index 0000000..fdec902
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md
@@ -0,0 +1,207 @@
+
requires
+
+
+
TlsRequires
+
+```python
+TlsRequires(self, endpoint_name, relation_ids=None)
+```
+
+The client's side of the interface protocol.
+
+The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+[Certificate]: common.md#tls_certificates_common.Certificate
+[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+[server_certs]: requires.md#requires.TlsRequires.server_certs
+[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+[client_certs]: requires.md#requires.TlsRequires.server_certs
+
+
application_certs
+
+
+List of [Certificate][] instances for all available application certs.
+
+
client_certs
+
+
+List of [Certificate][] instances for all available client certs.
+
+
client_certs_map
+
+
+Mapping of client [Certificate][] instances by their `common_name`.
+
+
root_ca_cert
+
+
+Root CA certificate.
+
+
root_ca_chain
+
+
+The chain of trust for the root CA.
+
+
server_certs
+
+
+List of [Certificate][] instances for all available server certs.
+
+
server_certs_map
+
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
get_ca
+
+```python
+TlsRequires.get_ca()
+```
+
+Return the root CA certificate.
+
+Same as [root_ca_cert][].
+
+
get_chain
+
+```python
+TlsRequires.get_chain()
+```
+
+Return the chain of trust for the root CA.
+
+Same as [root_ca_chain][].
+
+
get_client_cert
+
+```python
+TlsRequires.get_client_cert()
+```
+
+Deprecated. Use [request_client_cert][] and the [client_certs][]
+collection instead.
+
+Return a globally shared client certificate and key.
+
+
get_server_cert
+
+```python
+TlsRequires.get_server_cert()
+```
+
+Deprecated. Use the [server_certs][] collection instead.
+
+Return the cert and key of the first server certificate requested.
+
+
get_batch_requests
+
+```python
+TlsRequires.get_batch_requests()
+```
+
+Deprecated. Use [server_certs_map][] instead.
+
+Mapping of server [Certificate][] instances by their `common_name`.
+
+
request_server_cert
+
+```python
+TlsRequires.request_server_cert(cn, sans=None, cert_name=None)
+```
+
+Request a server certificate and key be generated for the given
+common name (`cn`) and optional list of alternative names (`sans`).
+
+The `cert_name` is deprecated and not needed.
+
+This can be called multiple times to request more than one server
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
+
+```python
+TlsRequires.request_server_certs()
+```
+
+Deprecated. Just use [request_server_cert][]; this does nothing.
+
+
request_client_cert
+
+```python
+TlsRequires.request_client_cert(cn, sans)
+```
+
+Request a client certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans`).
+
+This can be called multiple times to request more than one client
+certificate, although the common names must be unique. If called
+again with the same common name, it will be ignored.
+
+
request_application_cert
+
+```python
+TlsRequires.request_application_cert(cn, sans)
+```
+
+Request an application certificate and key be generated for the given
+common name (`cn`) and list of alternative names (`sans` ) of this
+unit and all peer units. All units will share a single certificates.
+
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml b/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml
new file mode 100644
index 0000000..beec53b
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml
@@ -0,0 +1,6 @@
+name: tls-certificates
+summary: |
+ A Transport Layer Security (TLS) charm layer that uses requires and provides
+ to exchange certifcates.
+version: 1
+repo: https://github.com/juju-solutions/interface-tls-certificates
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/make_docs b/kubernetes-worker/hooks/relations/tls-certificates/make_docs
new file mode 100644
index 0000000..2f2274a
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/make_docs
@@ -0,0 +1,23 @@
+#!.tox/py3/bin/python
+
+import sys
+import importlib
+from pathlib import Path
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'cert': {'interface': 'tls-certificates'}},
+ 'provides': {'cert': {'interface': 'tls-certificates'}},
+ }
+ sys.path.append('..')
+ sys.modules[''] = importlib.import_module(Path.cwd().name)
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/provides.py b/kubernetes-worker/hooks/relations/tls-certificates/provides.py
new file mode 100644
index 0000000..0262baa
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/provides.py
@@ -0,0 +1,301 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+
+from .tls_certificates_common import (
+ ApplicationCertificateRequest,
+ CertificateRequest
+)
+
+
+class TlsProvides(Endpoint):
+ """
+ The provider's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever any clients are joined.
+
+ * `{endpoint_name}.certs.requested`
+ When there are new certificate requests of any kind to be processed.
+ The requests can be accessed via [new_requests][].
+
+ * `{endpoint_name}.server.certs.requested`
+ When there are new server certificate requests to be processed.
+ The requests can be accessed via [new_server_requests][].
+
+ * `{endpoint_name}.client.certs.requested`
+ When there are new client certificate requests to be processed.
+ The requests can be accessed via [new_client_requests][].
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [all_requests]: provides.md#provides.TlsProvides.all_requests
+ [new_requests]: provides.md#provides.TlsProvides.new_requests
+ [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests
+ [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+ toggle_flag(self.expand_name('{endpoint_name}.certs.requested'),
+ self.new_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'),
+ self.new_client_requests)
+ toggle_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'),
+ self.new_application_requests)
+ # For backwards compatibility, set the old "cert" flags as well
+ toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'),
+ self.new_server_requests)
+ toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'),
+ self.new_client_requests)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.server.certs.requested'))
+ clear_flag(self.expand_name('{endpoint_name}.client.certs.requested'))
+ clear_flag(
+ self.expand_name('{endpoint_name}.application.certs.requested'))
+
+ def set_ca(self, certificate_authority):
+ """
+ Publish the CA to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same CA, so send it to them.
+ relation.to_publish_raw['ca'] = certificate_authority
+
+ def set_chain(self, chain):
+ """
+ Publish the chain of trust to all related applications.
+ """
+ for relation in self.relations:
+ # All the clients get the same chain, so send it to them.
+ relation.to_publish_raw['chain'] = chain
+
+ def set_client_cert(self, cert, key):
+ """
+ Deprecated. This is only for backwards compatibility.
+
+ Publish a globally shared client cert and key.
+ """
+ for relation in self.relations:
+ relation.to_publish_raw.update({
+ 'client.cert': cert,
+ 'client.key': key,
+ })
+
+ def set_server_cert(self, scope, cert, key):
+ """
+ Deprecated. Use one of the [new_requests][] collections and
+ `request.set_cert()` instead.
+
+ Set the server cert and key for the request identified by `scope`.
+ """
+ request = self.get_server_requests()[scope]
+ request.set_cert(cert, key)
+
+ def set_server_multicerts(self, scope):
+ """
+ Deprecated. Done automatically.
+ """
+ pass
+
+ def add_server_cert(self, scope, cn, cert, key):
+ '''
+ Deprecated. Use `request.set_cert()` instead.
+ '''
+ self.set_server_cert(scope, cert, key)
+
+ def get_server_requests(self):
+ """
+ Deprecated. Use the [new_requests][] or [server_requests][]
+ collections instead.
+
+ One provider can have many requests to generate server certificates.
+ Return a map of all server request objects indexed by a unique
+ identifier.
+ """
+ return {req._key: req for req in self.new_server_requests}
+
+ @property
+ def all_requests(self):
+ """
+ List of all requests that have been made.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('certs.regen',
+ 'tls.certs.available')
+ def regen_all_certs():
+ tls = endpoint_from_flag('tls.certs.available')
+ for request in tls.all_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ requests = []
+ for unit in self.all_joined_units:
+ # handle older single server cert request
+ if unit.received_raw['common_name']:
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ unit.received_raw['certificate_name'],
+ unit.received_raw['common_name'],
+ unit.received['sans'],
+ ))
+
+ # handle mutli server cert requests
+ reqs = unit.received['cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'server',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+
+ # handle client cert requests
+ reqs = unit.received['client_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(CertificateRequest(
+ unit,
+ 'client',
+ common_name,
+ common_name,
+ req['sans'],
+ ))
+ # handle application cert requests
+ reqs = unit.received['application_cert_requests'] or {}
+ for common_name, req in reqs.items():
+ requests.append(ApplicationCertificateRequest(
+ unit,
+ 'application',
+ common_name,
+ common_name,
+ req['sans']
+ ))
+ return requests
+
+ @property
+ def new_requests(self):
+ """
+ Filtered view of [all_requests][] that only includes requests that
+ haven't been handled.
+
+ Each will be an instance of [CertificateRequest][].
+
+ This collection can also be further filtered by request type using
+ [new_server_requests][] or [new_client_requests][].
+
+ Example usage:
+
+ ```python
+ @when('tls.certs.requested')
+ def gen_certs():
+ tls = endpoint_from_flag('tls.certs.requested')
+ for request in tls.new_requests:
+ cert, key = generate_cert(request.cert_type,
+ request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.all_requests if not req.is_handled]
+
+ @property
+ def new_server_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes server cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.server.certs.requested')
+ def gen_server_certs():
+ tls = endpoint_from_flag('tls.server.certs.requested')
+ for request in tls.new_server_requests:
+ cert, key = generate_server_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'server']
+
+ @property
+ def new_client_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes client cert
+ requests.
+
+ Each will be an instance of [CertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.client.certs.requested')
+ def gen_client_certs():
+ tls = endpoint_from_flag('tls.client.certs.requested')
+ for request in tls.new_client_requests:
+ cert, key = generate_client_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+ """
+ return [req for req in self.new_requests if req.cert_type == 'client']
+
+ @property
+ def new_application_requests(self):
+ """
+ Filtered view of [new_requests][] that only includes application cert
+ requests.
+
+ Each will be an instance of [ApplicationCertificateRequest][].
+
+ Example usage:
+
+ ```python
+ @when('tls.application.certs.requested')
+ def gen_application_certs():
+ tls = endpoint_from_flag('tls.application.certs.requested')
+ for request in tls.new_application_requests:
+ cert, key = generate_application_cert(request.common_name,
+ request.sans)
+ request.set_cert(cert, key)
+ ```
+
+ :returns: List of certificate requests.
+ :rtype: [CertificateRequest, ]
+ """
+ return [req for req in self.new_requests
+ if req.cert_type == 'application']
+
+ @property
+ def all_published_certs(self):
+ """
+ List of all [Certificate][] instances that this provider has published
+ for all related applications.
+ """
+ return [req.cert for req in self.all_requests if req.cert]
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml b/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml
new file mode 100644
index 0000000..c568913
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'TLS Certificates Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.TlsRequires+
+ - provides.md:
+ - provides
+ - provides.TlsProvides+
+ - common.md:
+ - tls_certificates_common.CertificateRequest+
+ - tls_certificates_common.Certificate+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Common: common.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/requires.py b/kubernetes-worker/hooks/relations/tls-certificates/requires.py
new file mode 100644
index 0000000..951f953
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/requires.py
@@ -0,0 +1,342 @@
+if not __package__:
+ # fix relative imports when building docs
+ import sys
+ __package__ = sys.modules[''].__name__
+
+import uuid
+
+from charmhelpers.core import hookenv
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag, toggle_flag
+from charms.reactive import Endpoint
+from charms.reactive import data_changed
+
+from .tls_certificates_common import Certificate
+
+
+class TlsRequires(Endpoint):
+ """
+ The client's side of the interface protocol.
+
+ The following flags may be set:
+
+ * `{endpoint_name}.available`
+ Whenever the relation is joined.
+
+ * `{endpoint_name}.ca.available`
+ When the root CA information is available via the [root_ca_cert][] and
+ [root_ca_chain][] properties.
+
+ * `{endpoint_name}.ca.changed`
+ When the root CA information has changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.certs.available`
+ When the requested server or client certs are available.
+
+ * `{endpoint_name}.certs.changed`
+ When the requested server or client certs have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.server.certs.available`
+ When the server certificates requested by [request_server_cert][] are
+ available via the [server_certs][] collection.
+
+ * `{endpoint_name}.server.certs.changed`
+ When the requested server certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ * `{endpoint_name}.client.certs.available`
+ When the client certificates requested by [request_client_cert][] are
+ available via the [client_certs][] collection.
+
+ * `{endpoint_name}.client.certs.changed`
+ When the requested client certificates have changed, whether because
+ they have just become available or if they were regenerated by the CA.
+ Once processed this flag should be removed by the charm.
+
+ The following flags have been deprecated:
+
+ * `{endpoint_name}.server.cert.available`
+ * `{endpoint_name}.client.cert.available`
+ * `{endpoint_name}.batch.cert.available`
+
+ [Certificate]: common.md#tls_certificates_common.Certificate
+ [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest
+ [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert
+ [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain
+ [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert
+ [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert
+ [server_certs]: requires.md#requires.TlsRequires.server_certs
+ [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map
+ [client_certs]: requires.md#requires.TlsRequires.server_certs
+ """
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ self.relations[0].to_publish_raw['unit_name'] = self._unit_name
+ prefix = self.expand_name('{endpoint_name}.')
+ ca_available = self.root_ca_cert
+ ca_changed = ca_available and data_changed(prefix + 'ca',
+ self.root_ca_cert)
+ server_available = self.server_certs
+ server_changed = server_available and data_changed(prefix + 'servers',
+ self.server_certs)
+ client_available = self.client_certs
+ client_changed = client_available and data_changed(prefix + 'clients',
+ self.client_certs)
+ certs_available = server_available or client_available
+ certs_changed = server_changed or client_changed
+
+ set_flag(prefix + 'available')
+ toggle_flag(prefix + 'ca.available', ca_available)
+ toggle_flag(prefix + 'ca.changed', ca_changed)
+ toggle_flag(prefix + 'server.certs.available', server_available)
+ toggle_flag(prefix + 'server.certs.changed', server_changed)
+ toggle_flag(prefix + 'client.certs.available', client_available)
+ toggle_flag(prefix + 'client.certs.changed', client_changed)
+ toggle_flag(prefix + 'certs.available', certs_available)
+ toggle_flag(prefix + 'certs.changed', certs_changed)
+ # deprecated
+ toggle_flag(prefix + 'server.cert.available', self.server_certs)
+ toggle_flag(prefix + 'client.cert.available', self.get_client_cert())
+ toggle_flag(prefix + 'batch.cert.available', self.server_certs)
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ prefix = self.expand_name('{endpoint_name}.')
+ clear_flag(prefix + 'available')
+ clear_flag(prefix + 'ca.available')
+ clear_flag(prefix + 'ca.changed')
+ clear_flag(prefix + 'server.certs.available')
+ clear_flag(prefix + 'server.certs.changed')
+ clear_flag(prefix + 'client.certs.available')
+ clear_flag(prefix + 'client.certs.changed')
+ clear_flag(prefix + 'certs.available')
+ clear_flag(prefix + 'certs.changed')
+ # deprecated
+ clear_flag(prefix + 'server.cert.available')
+ clear_flag(prefix + 'client.cert.available')
+ clear_flag(prefix + 'batch.cert.available')
+
+ @property
+ def _unit_name(self):
+ return hookenv.local_unit().replace('/', '_')
+
+ @property
+ def root_ca_cert(self):
+ """
+ Root CA certificate.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['ca']
+
+ def get_ca(self):
+ """
+ Return the root CA certificate.
+
+ Same as [root_ca_cert][].
+ """
+ return self.root_ca_cert
+
+ @property
+ def root_ca_chain(self):
+ """
+ The chain of trust for the root CA.
+ """
+ # only the leader of the provider should set the CA, or all units
+ # had better agree
+ return self.all_joined_units.received_raw['chain']
+
+ def get_chain(self):
+ """
+ Return the chain of trust for the root CA.
+
+ Same as [root_ca_chain][].
+ """
+ return self.root_ca_chain
+
+ def get_client_cert(self):
+ """
+ Deprecated. Use [request_client_cert][] and the [client_certs][]
+ collection instead.
+
+ Return a globally shared client certificate and key.
+ """
+ data = self.all_joined_units.received_raw
+ return (data['client.cert'], data['client.key'])
+
+ def get_server_cert(self):
+ """
+ Deprecated. Use the [server_certs][] collection instead.
+
+ Return the cert and key of the first server certificate requested.
+ """
+ if not self.server_certs:
+ return (None, None)
+ cert = self.server_certs[0]
+ return (cert.cert, cert.key)
+
+ @property
+ def server_certs(self):
+ """
+ List of [Certificate][] instances for all available server certs.
+ """
+ certs = []
+ raw_data = self.all_joined_units.received_raw
+ json_data = self.all_joined_units.received
+
+ # for backwards compatibility, the first cert goes in its own fields
+ if self.relations:
+ common_name = self.relations[0].to_publish_raw['common_name']
+ cert = raw_data['{}.server.cert'.format(self._unit_name)]
+ key = raw_data['{}.server.key'.format(self._unit_name)]
+ if cert and key:
+ certs.append(Certificate('server',
+ common_name,
+ cert,
+ key))
+
+ # subsequent requests go in the collection
+ field = '{}.processed_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ certs.extend(Certificate('server',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items())
+ return certs
+
+ @property
+ def application_certs(self):
+ """
+ List containg the application Certificate cert.
+
+ :returns: A list containing one certificate
+ :rtype: [Certificate()]
+ """
+ certs = []
+ json_data = self.all_joined_units.received
+ field = '{}.processed_application_requests'.format(self._unit_name)
+ certs_data = json_data[field] or {}
+ app_cert_data = certs_data.get('app_data')
+ if app_cert_data:
+ certs = [Certificate(
+ 'server',
+ 'app_data',
+ app_cert_data['cert'],
+ app_cert_data['key'])]
+ return certs
+
+ @property
+ def server_certs_map(self):
+ """
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.server_certs}
+
+ def get_batch_requests(self):
+ """
+ Deprecated. Use [server_certs_map][] instead.
+
+ Mapping of server [Certificate][] instances by their `common_name`.
+ """
+ return self.server_certs_map
+
+ @property
+ def client_certs(self):
+ """
+ List of [Certificate][] instances for all available client certs.
+ """
+ field = '{}.processed_client_requests'.format(self._unit_name)
+ certs_data = self.all_joined_units.received[field] or {}
+ return [Certificate('client',
+ common_name,
+ cert['cert'],
+ cert['key'])
+ for common_name, cert in certs_data.items()]
+
+ @property
+ def client_certs_map(self):
+ """
+ Mapping of client [Certificate][] instances by their `common_name`.
+ """
+ return {cert.common_name: cert for cert in self.client_certs}
+
+ def request_server_cert(self, cn, sans=None, cert_name=None):
+ """
+ Request a server certificate and key be generated for the given
+ common name (`cn`) and optional list of alternative names (`sans`).
+
+ The `cert_name` is deprecated and not needed.
+
+ This can be called multiple times to request more than one server
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ to_publish_raw = self.relations[0].to_publish_raw
+ if to_publish_raw['common_name'] in (None, '', cn):
+ # for backwards compatibility, first request goes in its own fields
+ to_publish_raw['common_name'] = cn
+ to_publish_json['sans'] = sans or []
+ cert_name = to_publish_raw.get('certificate_name') or cert_name
+ if cert_name is None:
+ cert_name = str(uuid.uuid4())
+ to_publish_raw['certificate_name'] = cert_name
+ else:
+ # subsequent requests go in the collection
+ requests = to_publish_json.get('cert_requests', {})
+ requests[cn] = {'sans': sans or []}
+ to_publish_json['cert_requests'] = requests
+
+ def add_request_server_cert(self, cn, sans):
+ """
+ Deprecated. Use [request_server_cert][] instead.
+ """
+ self.request_server_cert(cn, sans)
+
+ def request_server_certs(self):
+ """
+ Deprecated. Just use [request_server_cert][]; this does nothing.
+ """
+ pass
+
+ def request_client_cert(self, cn, sans):
+ """
+ Request a client certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans`).
+
+ This can be called multiple times to request more than one client
+ certificate, although the common names must be unique. If called
+ again with the same common name, it will be ignored.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('client_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['client_cert_requests'] = requests
+
+ def request_application_cert(self, cn, sans):
+ """
+ Request an application certificate and key be generated for the given
+ common name (`cn`) and list of alternative names (`sans` ) of this
+ unit and all peer units. All units will share a single certificates.
+ """
+ if not self.relations:
+ return
+ # assume we'll only be connected to one provider
+ to_publish_json = self.relations[0].to_publish
+ requests = to_publish_json.get('application_cert_requests', {})
+ requests[cn] = {'sans': sans}
+ to_publish_json['application_cert_requests'] = requests
diff --git a/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py b/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py
new file mode 100644
index 0000000..99a2f8c
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py
@@ -0,0 +1,302 @@
+from charms.reactive import clear_flag, is_data_changed, data_changed
+
+
+class CertificateRequest(dict):
+ def __init__(self, unit, cert_type, cert_name, common_name, sans):
+ self._unit = unit
+ self._cert_type = cert_type
+ super().__init__({
+ 'certificate_name': cert_name,
+ 'common_name': common_name,
+ 'sans': sans,
+ })
+
+ @property
+ def _key(self):
+ return '.'.join((self._unit.relation.relation_id,
+ self.unit_name,
+ self.common_name))
+
+ def resolve_unit_name(self, unit):
+ """Return name of unit associated with this request.
+
+ unit_name should be provided in the relation data to ensure
+ compatability with cross-model relations. If the unit name
+ is absent then fall back to unit_name attribute of the
+ unit associated with this request.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: Name of unit
+ :rtype: str
+ """
+ unit_name = unit.received_raw['unit_name']
+ if not unit_name:
+ unit_name = unit.unit_name
+ return unit_name
+
+ @property
+ def unit_name(self):
+ """Name of this unit.
+
+ :returns: Name of unit
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).replace('/', '_')
+
+ @property
+ def application_name(self):
+ """Name of the application which the request came from.
+
+ :returns: Name of application
+ :rtype: str
+ """
+ return self.resolve_unit_name(unit=self._unit).split('/')[0]
+
+ @property
+ def cert_type(self):
+ """
+ Type of certificate, 'server' or 'client', being requested.
+ """
+ return self._cert_type
+
+ @property
+ def cert_name(self):
+ return self['certificate_name']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def sans(self):
+ return self['sans']
+
+ @property
+ def _publish_key(self):
+ if self.cert_type == 'server':
+ return '{}.processed_requests'.format(self.unit_name)
+ elif self.cert_type == 'client':
+ return '{}.processed_client_requests'.format(self.unit_name)
+ raise ValueError('Unknown cert_type: {}'.format(self.cert_type))
+
+ @property
+ def _server_cert_key(self):
+ return '{}.server.cert'.format(self.unit_name)
+
+ @property
+ def _server_key_key(self):
+ return '{}.server.key'.format(self.unit_name)
+
+ @property
+ def _is_top_level_server_cert(self):
+ return (self.cert_type == 'server' and
+ self.common_name == self._unit.received_raw['common_name'])
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+ """
+ cert, key = None, None
+ if self._is_top_level_server_cert:
+ tpr = self._unit.relation.to_publish_raw
+ cert = tpr[self._server_cert_key]
+ key = tpr[self._server_key_key]
+ else:
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get(self.common_name, {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ def set_cert(self, cert, key):
+ rel = self._unit.relation
+ if self._is_top_level_server_cert:
+ # backwards compatibility; if this is the cert that was requested
+ # as a single server cert, set it in the response as the single
+ # server cert
+ rel.to_publish_raw.update({
+ self._server_cert_key: cert,
+ self._server_key_key: key,
+ })
+ else:
+ data = rel.to_publish.get(self._publish_key, {})
+ data[self.common_name] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[self._publish_key] = data
+ if not rel.endpoint.new_server_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.server'
+ '.cert.requested'))
+ if not rel.endpoint.new_requests:
+ clear_flag(rel.endpoint.expand_name('{endpoint_name}.'
+ 'certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class ApplicationCertificateRequest(CertificateRequest):
+ """
+ A request for an application consistent certificate.
+
+ This is a request for a certificate that works for all units of an
+ application. All sans and cns are added together to produce one
+ certificate and the same certificate and key are sent to all the
+ units of an application. Only one ApplicationCertificateRequest
+ is needed per application.
+ """
+
+ @property
+ def _key(self):
+ """Key to identify this cert.
+
+ :returns: cert key
+ :rtype: str
+ """
+ return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert')
+
+ @property
+ def cert(self):
+ """
+ The cert published for this request, if any.
+
+ :returns: Certificate
+ :rtype: Certificate or None
+ """
+ cert, key = None, None
+ tp = self._unit.relation.to_publish
+ certs_data = tp.get(self._publish_key, {})
+ cert_data = certs_data.get('app_data', {})
+ cert = cert_data.get('cert')
+ key = cert_data.get('key')
+ if cert and key:
+ return Certificate(self.cert_type, self.common_name, cert, key)
+ return None
+
+ @property
+ def is_handled(self):
+ """Whether the certificate has been handled.
+
+ :returns: If the cert has been handled
+ :rtype: bool
+ """
+ has_cert = self.cert is not None
+ same_sans = not is_data_changed(self._key,
+ sorted(set(self.sans or [])))
+ return has_cert and same_sans
+
+ @property
+ def sans(self):
+ """Generate a list of all sans from all units of application
+
+ Examine all units of the application and compile a list of
+ all sans. CNs are treated as addition san entries.
+
+ :returns: List of sans
+ :rtype: List[str]
+ """
+ _sans = []
+ for unit in self._unit.relation.units:
+ reqs = unit.received['application_cert_requests'] or {}
+ for cn, req in reqs.items():
+ _sans.append(cn)
+ _sans.extend(req['sans'])
+ return sorted(list(set(_sans)))
+
+ @property
+ def _request_key(self):
+ """Key used to request cert
+
+ :returns: Key used to request cert
+ :rtype: str
+ """
+ return 'application_cert_requests'
+
+ def derive_publish_key(self, unit=None):
+ """Derive the application cert publish key for a unit.
+
+ :param unit: Unit to extract name from
+ :type unit: charms.reactive.endpoints.RelatedUnit
+ :returns: publish key
+ :rtype: str
+ """
+ if not unit:
+ unit = self._unit
+ unit_name = self.resolve_unit_name(unit).replace('/', '_')
+ return '{}.processed_application_requests'.format(unit_name)
+
+ @property
+ def _publish_key(self):
+ """Key used to publish cert
+
+ :returns: Key used to publish cert
+ :rtype: str
+ """
+ return self.derive_publish_key(unit=self._unit)
+
+ def set_cert(self, cert, key):
+ """Send the cert and key to all units of the application
+
+ :param cert: TLS Certificate
+ :type cert: str
+ :param key: TLS Private Key
+ :type cert: str
+ """
+ rel = self._unit.relation
+ for unit in self._unit.relation.units:
+ pub_key = self.derive_publish_key(unit=unit)
+ data = rel.to_publish.get(
+ pub_key,
+ {})
+ data['app_data'] = {
+ 'cert': cert,
+ 'key': key,
+ }
+ rel.to_publish[pub_key] = data
+ if not rel.endpoint.new_application_requests:
+ clear_flag(rel.endpoint.expand_name(
+ '{endpoint_name}.application.certs.requested'))
+ data_changed(self._key, sorted(set(self.sans or [])))
+
+
+class Certificate(dict):
+ """
+ Represents a created certificate and key.
+
+ The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can
+ be accessed either as properties or as the contents of the dict.
+ """
+ def __init__(self, cert_type, common_name, cert, key):
+ super().__init__({
+ 'cert_type': cert_type,
+ 'common_name': common_name,
+ 'cert': cert,
+ 'key': key,
+ })
+
+ @property
+ def cert_type(self):
+ return self['cert_type']
+
+ @property
+ def common_name(self):
+ return self['common_name']
+
+ @property
+ def cert(self):
+ return self['cert']
+
+ @property
+ def key(self):
+ return self['key']
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore b/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore
new file mode 100644
index 0000000..5f9f2c5
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore
@@ -0,0 +1,3 @@
+.tox
+__pycache__
+*.pyc
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE b/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/README.md b/kubernetes-worker/hooks/relations/vsphere-integration/README.md
new file mode 100644
index 0000000..28ff438
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/README.md
@@ -0,0 +1,28 @@
+# Overview
+
+This layer encapsulates the `vsphere-integration` interface communication
+protocol and provides an API for charms on either side of relations using this
+interface.
+
+## Usage
+
+In your charm's `layer.yaml`, ensure that `interface:vsphere-integration` is
+included in the `includes` section:
+
+```yaml
+includes: ['layer:basic', 'interface:vsphere-integration']
+```
+
+And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined
+using the `vsphere-integration` interface protocol:
+
+```yaml
+requires:
+ vsphere:
+ interface: vsphere-integration
+```
+
+For documentation on how to use the API for this interface, see:
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md) (this will only be used by the vsphere-integrator charm)
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/__init__.py b/kubernetes-worker/hooks/relations/vsphere-integration/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/copyright b/kubernetes-worker/hooks/relations/vsphere-integration/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md b/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md
new file mode 100644
index 0000000..796b7e6
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md
@@ -0,0 +1,74 @@
+
provides
+
+
+This is the provides side of the interface layer, for use only by the
+vSphere integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for vSphere integration
+ features. The vSphere integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+
+
+
+
+A list of the new or updated `IntegrationRequests` that
+have been made.
+
+
mark_completed
+
+```python
+VsphereIntegrationProvides.mark_completed(self)
+```
+
+Mark all requests as completed and remove the `requests-pending` flag.
+
+
IntegrationRequest
+
+```python
+IntegrationRequest(self, unit)
+```
+
+A request for integration from a single remote unit.
+
+
has_credentials
+
+
+Whether or not credentials have been set via `set_credentials`.
+
+
is_changed
+
+
+Whether this request has changed since the last time it was
+marked completed (if ever).
+
+
set_credentials
+
+```python
+IntegrationRequest.set_credentials(self, vsphere_ip, user, password, datacenter, datastore)
+```
+
+Set the credentials for this request.
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md b/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md
new file mode 100644
index 0000000..0ce10a9
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md
@@ -0,0 +1,56 @@
+
requires
+
+
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with vSphere native features. The integration will be
+provided by the vSphere integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of vSphere
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific vSphere features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the vSphere instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+
VsphereIntegrationRequires
+
+```python
+VsphereIntegrationRequires(self, endpoint_name, relation_ids=None)
+```
+
+Interface to request integration access.
+
+Note that due to resource limits and permissions granularity, policies are
+limited to being applied at the charm level. That means that, if any
+permissions are requested (i.e., any of the enable methods are called),
+what is granted will be the sum of those ever requested by any instance of
+the charm on this cloud.
+
+Labels, on the other hand, will be instance specific.
+
+Example usage:
+
+```python
+from charms.reactive import when, endpoint_from_flag
+
+@when('endpoint.vsphere.ready')
+def vsphere_integration_ready():
+ vsphere = endpoint_from_flag('endpoint.vsphere.joined')
+ update_config_enable_vsphere(vsphere.vsphere_ip,
+ vsphere.user,
+ vsphere.password,
+ vsphere.datacenter,
+ vsphere.datastore)
+```
+
+
is_ready
+
+
+Whether or not the request for this instance has been completed.
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml b/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml
new file mode 100644
index 0000000..c4c0c07
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml
@@ -0,0 +1,4 @@
+name: vsphere-integration
+summary: Interface for connecting to the VMware vSphere integrator charm.
+version: 1
+maintainer: Kevin Monroe
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/make_docs b/kubernetes-worker/hooks/relations/vsphere-integration/make_docs
new file mode 100644
index 0000000..04cf35b
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/make_docs
@@ -0,0 +1,20 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'vsphere': {'interface': 'vsphere'}},
+ 'provides': {'vsphere': {'interface': 'vsphere'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/provides.py b/kubernetes-worker/hooks/relations/vsphere-integration/provides.py
new file mode 100644
index 0000000..c3db1d8
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/provides.py
@@ -0,0 +1,132 @@
+"""
+This is the provides side of the interface layer, for use only by the
+vSphere integration charm itself.
+
+The flags that are set by the provides side of this interface are:
+
+* **`endpoint.{endpoint_name}.requested`** This flag is set when there is
+ a new or updated request by a remote unit for vSphere integration
+ features. The vSphere integration charm should then iterate over each
+ request, perform whatever actions are necessary to satisfy those requests,
+ and then mark them as complete.
+"""
+
+from operator import attrgetter
+
+from charms.reactive import Endpoint
+from charms.reactive import when
+from charms.reactive import toggle_flag, clear_flag
+
+
+class VsphereIntegrationProvides(Endpoint):
+ """
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+ from charms import layer
+
+ @when('endpoint.vsphere.requests-pending')
+ def handle_requests():
+ vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending')
+ for request in vsphere.requests:
+ request.set_credentials(layer.vsphere.get_vsphere_credentials())
+ request.set_config(layer.vsphere.get_vsphere_config())
+ vsphere.mark_completed()
+ ```
+ """
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_requests(self):
+ toggle_flag(self.expand_name('requests-pending'),
+ len(self.new_requests) > 0)
+ clear_flag(self.expand_name('changed'))
+
+ @property
+ def all_requests(self):
+ """
+ A list of all the #IntegrationRequests that have been made.
+ """
+ return [IntegrationRequest(unit) for unit in self.all_joined_units]
+
+ @property
+ def new_requests(self):
+ """
+ A list of the new or updated #IntegrationRequests that have been made.
+ """
+ is_changed = attrgetter('is_changed')
+ return list(filter(is_changed, self.all_requests))
+
+ def mark_completed(self):
+ """
+ Remove the `requests-pending` flag.
+ """
+ clear_flag(self.expand_name('requests-pending'))
+
+
+class IntegrationRequest:
+ """
+ A request for integration from a single remote unit.
+ """
+ def __init__(self, unit):
+ self._unit = unit
+
+ @property
+ def _to_publish(self):
+ return self._unit.relation.to_publish
+
+ @property
+ def has_credentials(self):
+ """
+ Whether or not `set_credentials` has been called.
+ """
+ return {'vsphere_ip', 'user',
+ 'password', 'datacenter'}.issubset(self._to_publish)
+
+ @property
+ def has_config(self):
+ """
+ Whether or not `set_config` has been called.
+ """
+ return {'datastore', 'folder',
+ 'respool_path'}.issubset(self._to_publish)
+
+ @property
+ def is_changed(self):
+ """
+ Whether this request has changed since the last time it was
+ marked completed (if ever).
+ """
+ return not (self.has_credentials and self.has_config)
+
+ @property
+ def unit_name(self):
+ return self._unit.unit_name
+
+ def set_credentials(self,
+ vsphere_ip,
+ user,
+ password,
+ datacenter):
+ """
+ Set the vsphere credentials for this request.
+ """
+ self._to_publish.update({
+ 'vsphere_ip': vsphere_ip,
+ 'user': user,
+ 'password': password,
+ 'datacenter': datacenter,
+ })
+
+ def set_config(self,
+ datastore,
+ folder,
+ respool_path):
+ """
+ Set the non-credential vsphere config for this request.
+ """
+ self._to_publish.update({
+ 'datastore': datastore,
+ 'folder': folder,
+ 'respool_path': respool_path,
+ })
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml
new file mode 100644
index 0000000..e1d5d4a
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'VMware vSphere Integration Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.VsphereIntegrationRequires+
+ - provides.md:
+ - provides
+ - provides.VsphereIntegrationProvides+
+ - provides.IntegrationRequest+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+
+gens_dir: docs
diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/requires.py b/kubernetes-worker/hooks/relations/vsphere-integration/requires.py
new file mode 100644
index 0000000..d8b9cdb
--- /dev/null
+++ b/kubernetes-worker/hooks/relations/vsphere-integration/requires.py
@@ -0,0 +1,141 @@
+"""
+This is the requires side of the interface layer, for use in charms that wish
+to request integration with vSphere native features. The integration will be
+provided by the vSphere integration charm, which allows the requiring charm
+to not require cloud credentials itself and not have a lot of vSphere
+specific API code.
+
+The flags that are set by the requires side of this interface are:
+
+* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation
+ has been joined, and the charm should then use the methods documented below
+ to request specific vSphere features. This flag is automatically removed
+ if the relation is broken. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested
+ features have been enabled for the vSphere instance on which the charm is
+ running. This flag is automatically removed if new integration features are
+ requested. It should not be removed by the charm.
+
+* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data
+ changes after the ready flag was set. This flag should be removed by the
+ charm once handled.
+"""
+
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not
+from charms.reactive import clear_flag, is_flag_set, set_flag, toggle_flag
+from charms.reactive import data_changed
+
+
+class VsphereIntegrationRequires(Endpoint):
+ """
+ Interface to request integration access.
+
+ Note that due to resource limits and permissions granularity, policies are
+ limited to being applied at the charm level. That means that, if any
+ permissions are requested (i.e., any of the enable methods are called),
+ what is granted will be the sum of those ever requested by any instance of
+ the charm on this cloud.
+
+ Labels, on the other hand, will be instance specific.
+
+ Example usage:
+
+ ```python
+ from charms.reactive import when, endpoint_from_flag
+
+ @when('endpoint.vsphere.ready')
+ def vsphere_integration_ready():
+ vsphere = endpoint_from_flag('endpoint.vsphere.joined')
+ update_config_enable_vsphere(vsphere.vsphere_ip,
+ vsphere.user,
+ vsphere.password,
+ vsphere.datacenter,
+ vsphere.datastore,
+ vsphere.folder,
+ vsphere.respool_path)
+ ```
+ """
+
+ @property
+ def _received(self):
+ """
+ Helper to streamline access to received data.
+ """
+ return self.all_joined_units.received
+
+ @when('endpoint.{endpoint_name}.changed')
+ def check_ready(self):
+ """
+ Manage flags to signal when the endpoint is ready as well as noting
+ if changes have been made since it became ready.
+ """
+ was_ready = is_flag_set(self.expand_name('ready'))
+ toggle_flag(self.expand_name('ready'), self.is_ready)
+ if self.is_ready and was_ready and self.is_changed:
+ set_flag(self.expand_name('ready.changed'))
+ clear_flag(self.expand_name('changed'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def remove_ready(self):
+ clear_flag(self.expand_name('ready'))
+
+ @property
+ def is_ready(self):
+ """
+ Whether or not the request for this instance has been completed.
+ """
+ return all(field is not None for field in [
+ self.vsphere_ip,
+ self.user,
+ self.password,
+ self.datacenter,
+ self.datastore,
+ self.folder,
+ self.respool_path,
+ ])
+
+ @property
+ def is_changed(self):
+ """
+ Whether or not the request for this instance has changed.
+ """
+ return data_changed(self.expand_name('all-data'), [
+ self.vsphere_ip,
+ self.user,
+ self.password,
+ self.datacenter,
+ self.datastore,
+ self.folder,
+ self.respool_path,
+ ])
+
+ @property
+ def vsphere_ip(self):
+ return self._received['vsphere_ip']
+
+ @property
+ def user(self):
+ return self._received['user']
+
+ @property
+ def password(self):
+ return self._received['password']
+
+ @property
+ def datacenter(self):
+ return self._received['datacenter']
+
+ @property
+ def datastore(self):
+ return self._received['datastore']
+
+ @property
+ def folder(self):
+ return self._received['folder']
+
+ @property
+ def respool_path(self):
+ return self._received['respool_path']
diff --git a/kubernetes-worker/hooks/start b/kubernetes-worker/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/stop b/kubernetes-worker/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/update-status b/kubernetes-worker/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/upgrade-charm b/kubernetes-worker/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/vsphere-relation-broken b/kubernetes-worker/hooks/vsphere-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/vsphere-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/vsphere-relation-changed b/kubernetes-worker/hooks/vsphere-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/vsphere-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/vsphere-relation-created b/kubernetes-worker/hooks/vsphere-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/vsphere-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/vsphere-relation-departed b/kubernetes-worker/hooks/vsphere-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/vsphere-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/hooks/vsphere-relation-joined b/kubernetes-worker/hooks/vsphere-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/kubernetes-worker/hooks/vsphere-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/kubernetes-worker/icon.svg b/kubernetes-worker/icon.svg
new file mode 100644
index 0000000..dca16ee
--- /dev/null
+++ b/kubernetes-worker/icon.svg
@@ -0,0 +1,362 @@
+
+
+
+
diff --git a/kubernetes-worker/layer.yaml b/kubernetes-worker/layer.yaml
new file mode 100644
index 0000000..7f20f03
--- /dev/null
+++ b/kubernetes-worker/layer.yaml
@@ -0,0 +1,67 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "interface:nrpe-external-master"
+- "layer:debug"
+- "interface:tls-certificates"
+- "layer:cis-benchmark"
+- "layer:coordinator"
+- "layer:kubernetes-common"
+- "interface:container-runtime"
+- "layer:snap"
+- "layer:leadership"
+- "layer:metrics"
+- "layer:nagios"
+- "layer:tls-client"
+- "layer:cdk-service-kicker"
+- "layer:kubernetes-master-worker-base"
+- "interface:http"
+- "interface:kubernetes-cni"
+- "interface:kube-dns"
+- "interface:kube-control"
+- "interface:aws-integration"
+- "interface:gcp-integration"
+- "interface:openstack-integration"
+- "interface:vsphere-integration"
+- "interface:azure-integration"
+- "interface:mount"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests",
+ ".tox", "__pycache__", "Makefile"]
+"options":
+ "coordinator":
+ # Absolute path to the charmhelpers.coordinator.BaseCoordinator to use.
+ "class": "charms.coordinator.SimpleCoordinator"
+ # Layer log level (debug, info, warning, error, critical)
+ "log_level": "info"
+ "basic":
+ "packages":
+ - "cifs-utils"
+ - "ceph-common"
+ - "nfs-common"
+ - "socat"
+ "python_packages": []
+ "use_venv": !!bool "true"
+ "include_system_packages": !!bool "false"
+ "tls-client":
+ "ca_certificate_path": "/root/cdk/ca.crt"
+ "server_certificate_path": ""
+ "server_key_path": ""
+ "client_certificate_path": ""
+ "client_key_path": ""
+ "cdk-service-kicker":
+ "services":
+ - "snap.kubelet.daemon"
+ - "snap.kube-proxy.daemon"
+ "debug": {}
+ "snap": {}
+ "leadership": {}
+ "nagios": {}
+ "cis-benchmark": {}
+ "kubernetes-common": {}
+ "kubernetes-master-worker-base": {}
+ "kubernetes-worker": {}
+"repo": "https://github.com/kubernetes/kubernetes.git"
+"config":
+ "deletes":
+ - "install_from_upstream"
+"is": "kubernetes-worker"
diff --git a/kubernetes-worker/lib/charms/coordinator.py b/kubernetes-worker/lib/charms/coordinator.py
new file mode 100644
index 0000000..b954b92
--- /dev/null
+++ b/kubernetes-worker/lib/charms/coordinator.py
@@ -0,0 +1,144 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import importlib
+
+from charmhelpers.coordinator import BaseCoordinator
+from charmhelpers.core import hookenv
+from charms import reactive
+import charms.layer
+
+
+__all__ = ['coordinator', 'acquire']
+
+
+def acquire(lock):
+ """
+ Sets either the coordinator.granted.{lockname} or
+ coordinator.requested.{lockname} state.
+
+ Returns True if the lock could be immediately granted.
+
+ If locks cannot be granted immediately, they will be granted
+ in a future hook and the coordinator.granted.{lockname} state set.
+ """
+ global coordinator
+ if coordinator.acquire(lock):
+ s = 'coordinator.granted.{}'.format(lock)
+ if not reactive.is_state(s):
+ log('Granted {} lock'.format(lock), hookenv.DEBUG)
+ reactive.set_state('coordinator.granted.{}'.format(lock))
+ return True
+ else:
+ log('Requested {} lock'.format(lock), hookenv.DEBUG)
+ reactive.set_state('coordinator.requested.{}'.format(lock))
+ return False
+
+
+options = charms.layer.options('coordinator')
+
+
+def log(msg, level=hookenv.INFO):
+ lmap = {hookenv.DEBUG: 1,
+ hookenv.INFO: 2,
+ hookenv.WARNING: 3,
+ hookenv.ERROR: 4,
+ hookenv.CRITICAL: 5}
+ if lmap[level] >= lmap[options.get('log_level', 'DEBUG').upper()]:
+ hookenv.log('Coordinator: {}'.format(msg), level)
+
+
+class SimpleCoordinator(BaseCoordinator):
+ '''A simple BaseCoordinator that is suitable for almost all cases.
+
+ Only one unit at a time will be granted locks. All requests by that
+ unit will be granted. So only one unit may run tasks guarded by a lock,
+ and the lock name is irrelevant.
+ '''
+ def default_grant(self, lock, unit, granted, queue):
+ '''Grant locks to only one unit at a time, regardless of the lock name.
+
+ This lets us keep separate locks like join and restart,
+ while ensuring the operations do not occur on different nodes
+ at the same time.
+ '''
+ existing_grants = {k: v for k, v in self.grants.items() if v}
+
+ # Return True if this unit has already been granted any lock.
+ if existing_grants.get(unit):
+ self.msg('Granting {} to {} (existing grants)'.format(lock, unit),
+ hookenv.INFO)
+ return True
+
+ # Return False if another unit has been granted any lock.
+ if existing_grants:
+ self.msg('Not granting {} to {} (locks held by {})'
+ ''.format(lock, unit, ','.join(existing_grants.keys())),
+ hookenv.INFO)
+ return False
+
+ # Otherwise, return True if the unit is first in the queue for
+ # this named lock.
+ if queue[0] == unit:
+ self.msg('Granting {} to {} (first in queue)'
+ ''.format(lock, unit), hookenv.INFO)
+ return True
+ else:
+ self.msg('Not granting {} to {} (not first in queue)'
+ ''.format(lock, unit), hookenv.INFO)
+ return False
+
+ def msg(self, msg, level=hookenv.DEBUG):
+ '''Emit a message.'''
+ log(msg, level)
+
+ def _save_state(self):
+ # If the leader aquired a lock, and now released it,
+ # there may be outstanding requests in the queue from other
+ # units. We need to grant them now, as we have no guarantee
+ # of another hook running on the leader for some time (until
+ # update-status).
+ self.handle()
+ super(SimpleCoordinator, self)._save_state()
+
+
+def _instantiate():
+ default_name = 'charms.coordinator.SimpleCoordinator'
+ full_name = options.get('class', default_name)
+ components = full_name.split('.')
+ module = '.'.join(components[:-1])
+ name = components[-1]
+
+ if not module:
+ module = 'charms.coordinator'
+
+ class_ = getattr(importlib.import_module(module), name)
+
+ assert issubclass(class_, BaseCoordinator), \
+ '{} is not a BaseCoordinator subclass'.format(full_name)
+
+ try:
+ # The Coordinator layer defines its own peer relation, as it
+ # can't piggy back on an existing peer relation that may not
+ # exist.
+ return class_(peer_relation_name='coordinator')
+ finally:
+ log('Using {} coordinator'.format(full_name), hookenv.DEBUG)
+
+
+# Instantiate the BaseCoordinator singleton, which installs
+# its charmhelpers.core.atstart() hooks.
+coordinator = _instantiate()
diff --git a/kubernetes-worker/lib/charms/layer/__init__.py b/kubernetes-worker/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/kubernetes-worker/lib/charms/layer/basic.py b/kubernetes-worker/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/kubernetes-worker/lib/charms/layer/execd.py b/kubernetes-worker/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<>> `get_version('kubelet')
+ (1, 6, 0)
+
+ """
+ cmd = '{} --version'.format(bin_name).split()
+ version_string = subprocess.check_output(cmd).decode('utf-8')
+ return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
+
+
+def retry(times, delay_secs):
+ """ Decorator for retrying a method call.
+
+ Args:
+ times: How many times should we retry before giving up
+ delay_secs: Delay in secs
+
+ Returns: A callable that would return the last call outcome
+ """
+
+ def retry_decorator(func):
+ """ Decorator to wrap the function provided.
+
+ Args:
+ func: Provided function should return either True od False
+
+ Returns: A callable that would return the last call outcome
+
+ """
+ def _wrapped(*args, **kwargs):
+ res = func(*args, **kwargs)
+ attempt = 0
+ while not res and attempt < times:
+ sleep(delay_secs)
+ res = func(*args, **kwargs)
+ if res:
+ break
+ attempt += 1
+ return res
+ return _wrapped
+
+ return retry_decorator
+
+
+def calculate_resource_checksum(resource):
+ ''' Calculate a checksum for a resource '''
+ md5 = hashlib.md5()
+ path = hookenv.resource_get(resource)
+ if path:
+ with open(path, 'rb') as f:
+ data = f.read()
+ md5.update(data)
+ return md5.hexdigest()
+
+
+def get_resource_checksum_db_key(checksum_prefix, resource):
+ ''' Convert a resource name to a resource checksum database key. '''
+ return checksum_prefix + resource
+
+
+def migrate_resource_checksums(checksum_prefix, snap_resources):
+ ''' Migrate resource checksums from the old schema to the new one '''
+ for resource in snap_resources:
+ new_key = get_resource_checksum_db_key(checksum_prefix, resource)
+ if not db.get(new_key):
+ path = hookenv.resource_get(resource)
+ if path:
+ # old key from charms.reactive.helpers.any_file_changed
+ old_key = 'reactive.files_changed.' + path
+ old_checksum = db.get(old_key)
+ db.set(new_key, old_checksum)
+ else:
+ # No resource is attached. Previously, this meant no checksum
+ # would be calculated and stored. But now we calculate it as if
+ # it is a 0-byte resource, so let's go ahead and do that.
+ zero_checksum = hashlib.md5().hexdigest()
+ db.set(new_key, zero_checksum)
+
+
+def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
+ hookenv.status_set('maintenance', 'Checking resources')
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ old_checksum = db.get(key)
+ new_checksum = calculate_resource_checksum(resource)
+ if new_checksum != old_checksum:
+ return True
+ return False
+
+
+def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
+ for resource in snap_resources:
+ key = get_resource_checksum_db_key(checksum_prefix, resource)
+ checksum = calculate_resource_checksum(resource)
+ db.set(key, checksum)
+
+
+def get_ingress_address(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ # if they don't have ingress-addresses they are running a juju that
+ # doesn't support spaces, so just return the private address
+ return hookenv.unit_get('private-address')
+
+ addresses = network_info['ingress-addresses']
+
+ # Need to prefer non-fan IP addresses due to various issues, e.g.
+ # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
+ # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
+ # prioritize those last. Not technically correct, but good enough.
+ try:
+ sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731
+ addresses = sorted(addresses, key=sort_key)
+ except Exception:
+ hookenv.log(traceback.format_exc())
+
+ return addresses[0]
+
+
+def get_ingress_address6(endpoint_name):
+ try:
+ network_info = hookenv.network_get(endpoint_name)
+ except NotImplementedError:
+ network_info = {}
+
+ if not network_info or 'ingress-addresses' not in network_info:
+ return None
+
+ addresses = network_info['ingress-addresses']
+
+ for addr in addresses:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version == 6:
+ return str(ip_addr)
+ else:
+ return None
+
+
+def service_restart(service_name):
+ hookenv.status_set('maintenance', 'Restarting {0} service'.format(
+ service_name))
+ host.service_restart(service_name)
+
+
+def service_start(service_name):
+ hookenv.log('Starting {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def service_stop(service_name):
+ hookenv.log('Stopping {0} service.'.format(service_name))
+ host.service_stop(service_name)
+
+
+def arch():
+ '''Return the package architecture as a string. Raise an exception if the
+ architecture is not supported by kubernetes.'''
+ # Get the package architecture for this system.
+ architecture = check_output(['dpkg', '--print-architecture']).rstrip()
+ # Convert the binary result into a string.
+ architecture = architecture.decode('utf-8')
+ return architecture
+
+
+def get_service_ip(service, namespace="kube-system", errors_fatal=True):
+ try:
+ output = kubectl('get', 'service', '--namespace', namespace, service,
+ '--output', 'json')
+ except CalledProcessError:
+ if errors_fatal:
+ raise
+ else:
+ return None
+ else:
+ svc = json.loads(output.decode())
+ return svc['spec']['clusterIP']
+
+
+def kubectl(*args):
+ ''' Run a kubectl cli command with a config file. Returns stdout and throws
+ an error if the command fails. '''
+ command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
+ hookenv.log('Executing {}'.format(command))
+ return check_output(command)
+
+
+def kubectl_success(*args):
+ ''' Runs kubectl with the given args. Returns True if successful, False if
+ not. '''
+ try:
+ kubectl(*args)
+ return True
+ except CalledProcessError:
+ return False
+
+
+def kubectl_manifest(operation, manifest):
+ ''' Wrap the kubectl creation command when using filepath resources
+ :param operation - one of get, create, delete, replace
+ :param manifest - filepath to the manifest
+ '''
+ # Deletions are a special case
+ if operation == 'delete':
+ # Ensure we immediately remove requested resources with --now
+ return kubectl_success(operation, '-f', manifest, '--now')
+ else:
+ # Guard against an error re-creating the same manifest multiple times
+ if operation == 'create':
+ # If we already have the definition, its probably safe to assume
+ # creation was true.
+ if kubectl_success('get', '-f', manifest):
+ hookenv.log('Skipping definition for {}'.format(manifest))
+ return True
+ # Execute the requested command that did not match any of the special
+ # cases above
+ return kubectl_success(operation, '-f', manifest)
+
+
+def get_node_name():
+ kubelet_extra_args = parse_extra_args('kubelet-extra-args')
+ cloud_provider = kubelet_extra_args.get('cloud-provider', '')
+ if is_state('endpoint.aws.ready'):
+ cloud_provider = 'aws'
+ elif is_state('endpoint.gcp.ready'):
+ cloud_provider = 'gce'
+ elif is_state('endpoint.openstack.ready'):
+ cloud_provider = 'openstack'
+ elif is_state('endpoint.vsphere.ready'):
+ cloud_provider = 'vsphere'
+ elif is_state('endpoint.azure.ready'):
+ cloud_provider = 'azure'
+ if cloud_provider == 'aws':
+ return getfqdn().lower()
+ else:
+ return gethostname().lower()
+
+
+def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
+ user='ubuntu', context='juju-context',
+ cluster='juju-cluster', password=None, token=None,
+ keystone=False, aws_iam_cluster_id=None):
+ '''Create a configuration for Kubernetes based on path using the supplied
+ arguments for values of the Kubernetes server, CA, key, certificate, user
+ context and cluster.'''
+ if not key and not certificate and not password and not token:
+ raise ValueError('Missing authentication mechanism.')
+
+ # token and password are mutually exclusive. Error early if both are
+ # present. The developer has requested an impossible situation.
+ # see: kubectl config set-credentials --help
+ if token and password:
+ raise ValueError('Token and Password are mutually exclusive.')
+ # Create the config file with the address of the master server.
+ cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
+ '--server={2} --certificate-authority={3} --embed-certs=true'
+ check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
+ # Delete old users
+ cmd = 'kubectl config --kubeconfig={0} unset users'
+ check_call(split(cmd.format(kubeconfig)))
+ # Create the credentials using the client flags.
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials {1} '.format(kubeconfig, user)
+
+ if key and certificate:
+ cmd = '{0} --client-key={1} --client-certificate={2} '\
+ '--embed-certs=true'.format(cmd, key, certificate)
+ if password:
+ cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
+ # This is mutually exclusive from password. They will not work together.
+ if token:
+ cmd = "{0} --token={1}".format(cmd, token)
+ check_call(split(cmd))
+ # Create a default context with the cluster.
+ cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
+ '--cluster={2} --user={3}'
+ check_call(split(cmd.format(kubeconfig, context, cluster, user)))
+ # Make the config use this new context.
+ cmd = 'kubectl config --kubeconfig={0} use-context {1}'
+ check_call(split(cmd.format(kubeconfig, context)))
+ if keystone:
+ # create keystone user
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-credentials keystone-user'.format(kubeconfig)
+ check_call(split(cmd))
+ # create keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=keystone-user keystone'.format(kubeconfig, cluster)
+ check_call(split(cmd))
+ # use keystone context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'use-context keystone'.format(kubeconfig)
+ check_call(split(cmd))
+ # manually add exec command until kubectl can do it for us
+ with open(kubeconfig, "r") as f:
+ content = f.read()
+ content = content.replace("""- name: keystone-user
+ user: {}""", """- name: keystone-user
+ user:
+ exec:
+ command: "/snap/bin/client-keystone-auth"
+ apiVersion: "client.authentication.k8s.io/v1beta1"
+""")
+ with open(kubeconfig, "w") as f:
+ f.write(content)
+ if aws_iam_cluster_id:
+ # create aws-iam context
+ cmd = 'kubectl config --kubeconfig={0} ' \
+ 'set-context --cluster={1} ' \
+ '--user=aws-iam-user aws-iam-authenticator'
+ check_call(split(cmd.format(kubeconfig, cluster)))
+
+ # append a user for aws-iam
+ cmd = 'kubectl --kubeconfig={0} config set-credentials ' \
+ 'aws-iam-user --exec-command=aws-iam-authenticator ' \
+ '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \
+ '--exec-arg="-r" --exec-arg="<>" ' \
+ '--exec-api-version=client.authentication.k8s.io/v1alpha1'
+ check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id)))
+
+ # not going to use aws-iam context by default since we don't have
+ # the desired arn. This will make the config not usable if copied.
+
+ # cmd = 'kubectl config --kubeconfig={0} ' \
+ # 'use-context aws-iam-authenticator'.format(kubeconfig)
+ # check_call(split(cmd))
+
+
+def parse_extra_args(config_key):
+ elements = hookenv.config().get(config_key, '').split()
+ args = {}
+
+ for element in elements:
+ if '=' in element:
+ key, _, value = element.partition('=')
+ args[key] = value
+ else:
+ args[element] = 'true'
+
+ return args
+
+
+def configure_kubernetes_service(key, service, base_args, extra_args_key):
+ db = unitdata.kv()
+
+ prev_args_key = key + service
+ prev_snap_args = db.get(prev_args_key) or {}
+
+ extra_args = parse_extra_args(extra_args_key)
+
+ args = {}
+ args.update(base_args)
+ args.update(extra_args)
+
+ # CIS benchmark action may inject kv config to pass failing tests. Merge
+ # these after the func args as they should take precedence.
+ cis_args_key = 'cis-' + service
+ cis_args = db.get(cis_args_key) or {}
+ args.update(cis_args)
+
+ # Remove any args with 'None' values (all k8s args are 'k=v') and
+ # construct an arg string for use by 'snap set'.
+ args = {k: v for k, v in args.items() if v is not None}
+ args = ['--%s="%s"' % arg for arg in args.items()]
+ args = ' '.join(args)
+
+ snap_opts = {}
+ for arg in prev_snap_args:
+ # remove previous args by setting to null
+ snap_opts[arg] = 'null'
+ snap_opts['args'] = args
+ snap_opts = ['%s=%s' % opt for opt in snap_opts.items()]
+
+ cmd = ['snap', 'set', service] + snap_opts
+ check_call(cmd)
+
+ # Now that we've started doing snap configuration through the "args"
+ # option, we should never need to clear previous args again.
+ db.set(prev_args_key, {})
+
+
+def _snap_common_path(component):
+ return Path('/var/snap/{}/common'.format(component))
+
+
+def cloud_config_path(component):
+ return _snap_common_path(component) / 'cloud-config.conf'
+
+
+def _gcp_creds_path(component):
+ return _snap_common_path(component) / 'gcp-creds.json'
+
+
+def _daemon_env_path(component):
+ return _snap_common_path(component) / 'environment'
+
+
+def _cloud_endpoint_ca_path(component):
+ return _snap_common_path(component) / 'cloud-endpoint-ca.crt'
+
+
+def encryption_config_path():
+ apiserver_snap_common_path = _snap_common_path('kube-apiserver')
+ encryption_conf_dir = apiserver_snap_common_path / 'encryption'
+ return encryption_conf_dir / 'encryption_config.yaml'
+
+
+def write_gcp_snap_config(component):
+ # gcp requires additional credentials setup
+ gcp = endpoint_from_flag('endpoint.gcp.ready')
+ creds_path = _gcp_creds_path(component)
+ with creds_path.open('w') as fp:
+ os.fchmod(fp.fileno(), 0o600)
+ fp.write(gcp.credentials)
+
+ # create a cloud-config file that sets token-url to nil to make the
+ # services use the creds env var instead of the metadata server, as
+ # well as making the cluster multizone
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text('[Global]\n'
+ 'token-url = nil\n'
+ 'multizone = true\n')
+
+ daemon_env_path = _daemon_env_path(component)
+ if daemon_env_path.exists():
+ daemon_env = daemon_env_path.read_text()
+ if not daemon_env.endswith('\n'):
+ daemon_env += '\n'
+ else:
+ daemon_env = ''
+ if gcp_creds_env_key not in daemon_env:
+ daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
+ daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
+ daemon_env_path.write_text(daemon_env)
+
+
+def generate_openstack_cloud_config():
+ # openstack requires additional credentials setup
+ openstack = endpoint_from_flag('endpoint.openstack.ready')
+
+ lines = [
+ '[Global]',
+ 'auth-url = {}'.format(openstack.auth_url),
+ 'region = {}'.format(openstack.region),
+ 'username = {}'.format(openstack.username),
+ 'password = {}'.format(openstack.password),
+ 'tenant-name = {}'.format(openstack.project_name),
+ 'domain-name = {}'.format(openstack.user_domain_name),
+ 'tenant-domain-name = {}'.format(openstack.project_domain_name),
+ ]
+ if openstack.endpoint_tls_ca:
+ lines.append('ca-file = /etc/config/endpoint-ca.cert')
+
+ lines.extend([
+ '',
+ '[LoadBalancer]',
+ ])
+
+ if openstack.has_octavia in (True, None):
+ # Newer integrator charm will detect whether underlying OpenStack has
+ # Octavia enabled so we can set this intelligently. If we're still
+ # related to an older integrator, though, default to assuming Octavia
+ # is available.
+ lines.append('use-octavia = true')
+ else:
+ lines.append('use-octavia = false')
+ lines.append('lb-provider = haproxy')
+ if openstack.subnet_id:
+ lines.append('subnet-id = {}'.format(openstack.subnet_id))
+ if openstack.floating_network_id:
+ lines.append('floating-network-id = {}'.format(
+ openstack.floating_network_id))
+ if openstack.lb_method:
+ lines.append('lb-method = {}'.format(
+ openstack.lb_method))
+ if openstack.manage_security_groups:
+ lines.append('manage-security-groups = {}'.format(
+ openstack.manage_security_groups))
+ if any([openstack.bs_version,
+ openstack.trust_device_path,
+ openstack.ignore_volume_az]):
+ lines.append('')
+ lines.append('[BlockStorage]')
+ if openstack.bs_version is not None:
+ lines.append('bs-version = {}'.format(openstack.bs_version))
+ if openstack.trust_device_path is not None:
+ lines.append('trust-device-path = {}'.format(
+ openstack.trust_device_path))
+ if openstack.ignore_volume_az is not None:
+ lines.append('ignore-volume-az = {}'.format(
+ openstack.ignore_volume_az))
+ return '\n'.join(lines) + '\n'
+
+
+def write_azure_snap_config(component):
+ azure = endpoint_from_flag('endpoint.azure.ready')
+ comp_cloud_config_path = cloud_config_path(component)
+ comp_cloud_config_path.write_text(json.dumps({
+ 'useInstanceMetadata': True,
+ 'useManagedIdentityExtension': True,
+ 'subscriptionId': azure.subscription_id,
+ 'resourceGroup': azure.resource_group,
+ 'location': azure.resource_group_location,
+ 'vnetName': azure.vnet_name,
+ 'vnetResourceGroup': azure.vnet_resource_group,
+ 'subnetName': azure.subnet_name,
+ 'securityGroupName': azure.security_group_name,
+ 'loadBalancerSku': 'standard'
+ }))
+
+
+def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr,
+ bind_address=None):
+ kube_proxy_opts = {}
+ kube_proxy_opts['cluster-cidr'] = cluster_cidr
+ kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
+ kube_proxy_opts['logtostderr'] = 'true'
+ kube_proxy_opts['v'] = '0'
+ num_apis = len(api_servers)
+ kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis]
+ kube_proxy_opts['hostname-override'] = get_node_name()
+ if bind_address:
+ kube_proxy_opts['bind-address'] = bind_address
+ elif is_ipv6(cluster_cidr):
+ kube_proxy_opts['bind-address'] = '::'
+
+ if host.is_container():
+ kube_proxy_opts['conntrack-max-per-core'] = '0'
+
+ if is_dual_stack(cluster_cidr):
+ kube_proxy_opts['feature-gates'] = "IPv6DualStack=true"
+
+ configure_kubernetes_service(configure_prefix, 'kube-proxy',
+ kube_proxy_opts, 'proxy-extra-args')
+
+
+def get_unit_number():
+ return int(hookenv.local_unit().split('/')[1])
+
+
+def cluster_cidr():
+ '''Return the cluster CIDR provided by the CNI'''
+ cni = endpoint_from_flag('cni.available')
+ if not cni:
+ return None
+ config = hookenv.config()
+ if 'default-cni' in config:
+ # master
+ default_cni = config['default-cni']
+ else:
+ # worker
+ kube_control = endpoint_from_flag('kube-control.dns.available')
+ if not kube_control:
+ return None
+ default_cni = kube_control.get_default_cni()
+ return cni.get_config(default=default_cni)['cidr']
+
+
+def is_dual_stack(cidrs):
+ '''Detect IPv4/IPv6 dual stack from CIDRs'''
+ return {net.version for net in get_networks(cidrs)} == {4, 6}
+
+
+def is_ipv4(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv4_network(cidrs) is not None
+
+
+def is_ipv6(cidrs):
+ '''Detect IPv6 from CIDRs'''
+ return get_ipv6_network(cidrs) is not None
+
+
+def is_ipv6_preferred(cidrs):
+ '''Detect if IPv6 is preffered from CIDRs'''
+ return get_networks(cidrs)[0].version == 6
+
+
+def get_networks(cidrs):
+ '''Convert a comma-separated list of CIDRs to a list of networks.'''
+ if not cidrs:
+ return []
+ return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')]
+
+
+def get_ipv4_network(cidrs):
+ '''Get the IPv4 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(4)
+
+
+def get_ipv6_network(cidrs):
+ '''Get the IPv6 network from the given CIDRs or None'''
+ return {net.version: net for net in get_networks(cidrs)}.get(6)
+
+
+def enable_ipv6_forwarding():
+ '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.'''
+ check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1'])
+
+
+def get_bind_addrs(ipv4=True, ipv6=True):
+ '''Get all global-scoped addresses that we might bind to.'''
+ try:
+ output = check_output(["ip", "-br", "addr", "show", "scope", "global"])
+ except CalledProcessError:
+ # stderr will have any details, and go to the log
+ hookenv.log('Unable to determine global addresses', hookenv.ERROR)
+ return []
+
+ ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker')
+ accept_versions = set()
+ if ipv4:
+ accept_versions.add(4)
+ if ipv6:
+ accept_versions.add(6)
+
+ addrs = []
+ for line in output.decode('utf8').splitlines():
+ intf, state, *intf_addrs = line.split()
+ if state != 'UP' or any(intf.startswith(prefix)
+ for prefix in ignore_interfaces):
+ continue
+ for addr in intf_addrs:
+ ip_addr = ipaddress.ip_interface(addr).ip
+ if ip_addr.version in accept_versions:
+ addrs.append(str(ip_addr))
+ return addrs
+
+
+class InvalidVMwareHost(Exception):
+ pass
+
+
+def _get_vmware_uuid():
+ serial_id_file = '/sys/class/dmi/id/product_serial'
+ # The serial id from VMWare VMs comes in following format:
+ # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54
+ try:
+ with open(serial_id_file, 'r') as f:
+ serial_string = f.read().strip()
+ if "VMware-" not in serial_string:
+ hookenv.log("Unable to find VMware ID in "
+ "product_serial: {}".format(serial_string))
+ raise InvalidVMwareHost
+ serial_string = serial_string.split(
+ "VMware-")[1].replace(" ", "").replace("-", "")
+ uuid = "%s-%s-%s-%s-%s" % (
+ serial_string[0:8], serial_string[8:12], serial_string[12:16],
+ serial_string[16:20], serial_string[20:32])
+ except IOError as err:
+ hookenv.log("Unable to read UUID from sysfs: {}".format(err))
+ uuid = 'UNKNOWN'
+
+ return uuid
+
diff --git a/kubernetes-worker/lib/charms/layer/nagios.py b/kubernetes-worker/lib/charms/layer/nagios.py
new file mode 100644
index 0000000..f6ad998
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/nagios.py
@@ -0,0 +1,60 @@
+from pathlib import Path
+
+NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins'
+
+
+def install_nagios_plugin_from_text(text, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ text: Plugin source code (str)
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ # we could complain here, test the files are the same contents, or
+ # just bail. Idempotency is a big deal in Juju, so I'd like to be
+ # ok with being called with the same file multiple times, but we
+ # certainly want to catch the case where multiple layers are using
+ # the same filename for their nagios checks.
+ dest = dest_path.read_text()
+ if dest == text:
+ # same file
+ return dest_path
+ # different file contents!
+ # maybe someone changed options or something so we need to write
+ # it again
+
+ dest_path.write_text(text)
+ dest_path.chmod(0o755)
+
+ return dest_path
+
+
+def install_nagios_plugin_from_file(source_file_path, plugin_name):
+ """ Install a nagios plugin.
+
+ Args:
+ source_file_path: Path to plugin source file
+ plugin_name: Name of the plugin in nagios
+
+ Returns: Full path to installed plugin
+ """
+
+ return install_nagios_plugin_from_text(Path(source_file_path).read_text(),
+ plugin_name)
+
+
+def remove_nagios_plugin(plugin_name):
+ """ Remove a nagios plugin.
+
+ Args:
+ plugin_name: Name of the plugin in nagios
+
+ Returns: None
+ """
+ dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name
+ if dest_path.exists():
+ dest_path.unlink()
diff --git a/kubernetes-worker/lib/charms/layer/options.py b/kubernetes-worker/lib/charms/layer/options.py
new file mode 100644
index 0000000..d3f273f
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/options.py
@@ -0,0 +1,26 @@
+import os
+from pathlib import Path
+
+import yaml
+
+
+_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.'))
+_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml'
+_CACHE = {}
+
+
+def get(section=None, option=None, layer_file=_DEFAULT_FILE):
+ if option and not section:
+ raise ValueError('Cannot specify option without section')
+
+ layer_file = (_CHARM_PATH / layer_file).resolve()
+ if layer_file not in _CACHE:
+ with layer_file.open() as fp:
+ _CACHE[layer_file] = yaml.safe_load(fp.read())
+
+ data = _CACHE[layer_file].get('options', {})
+ if section:
+ data = data.get(section, {})
+ if option:
+ data = data.get(option)
+ return data
diff --git a/kubernetes-worker/lib/charms/layer/snap.py b/kubernetes-worker/lib/charms/layer/snap.py
new file mode 100644
index 0000000..88b8d89
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/snap.py
@@ -0,0 +1,455 @@
+# Copyright 2016-2019 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+
+import tenacity
+import yaml
+
+from charmhelpers.core import hookenv
+from charms import layer
+from charms import reactive
+from charms.reactive.helpers import any_file_changed, data_changed
+from datetime import datetime, timedelta
+
+
+def get_installed_flag(snapname):
+ return "snap.installed.{}".format(snapname)
+
+
+def get_refresh_available_flag(snapname):
+ return "snap.refresh-available.{}".format(snapname)
+
+
+def get_local_flag(snapname):
+ return "snap.local.{}".format(snapname)
+
+
+def get_disabled_flag(snapname):
+ return "snap.disabled.{}".format(snapname)
+
+
+def install(snapname, **kw):
+ """Install a snap.
+
+ Snap will be installed from the coresponding resource if available,
+ otherwise from the Snap Store.
+
+ Sets the snap.installed.{snapname} flag.
+
+ If the snap.installed.{snapname} flag is already set then the refresh()
+ function is called.
+ """
+ installed_flag = get_installed_flag(snapname)
+ local_flag = get_local_flag(snapname)
+ if reactive.is_flag_set(installed_flag):
+ refresh(snapname, **kw)
+ else:
+ if hookenv.has_juju_version("2.0"):
+ res_path = _resource_get(snapname)
+ if res_path is False:
+ _install_store(snapname, **kw)
+ else:
+ _install_local(res_path, **kw)
+ reactive.set_flag(local_flag)
+ else:
+ _install_store(snapname, **kw)
+ reactive.set_flag(installed_flag)
+
+ # Installing any snap will first ensure that 'core' is installed. Set an
+ # appropriate flag for consumers that want to get/set core options.
+ core_installed = get_installed_flag("core")
+ if not reactive.is_flag_set(core_installed):
+ reactive.set_flag(core_installed)
+
+
+def is_installed(snapname):
+ return reactive.is_flag_set(get_installed_flag(snapname))
+
+
+def is_local(snapname):
+ return reactive.is_flag_set(get_local_flag(snapname))
+
+
+def get_installed_snaps():
+ """Return a list of snaps which are installed by this layer."""
+ flag_prefix = "snap.installed."
+ return [flag[len(flag_prefix) :] for flag in reactive.get_flags() if flag.startswith(flag_prefix)]
+
+
+def refresh(snapname, **kw):
+ """Update a snap.
+
+ Snap will be pulled from the coresponding resource if available
+ and reinstalled if it has changed. Otherwise a 'snap refresh' is
+ run updating the snap from the Snap Store, potentially switching
+ channel and changing confinement options.
+ """
+ # Note that once you upload a resource, you can't remove it.
+ # This means we don't need to cope with an operator switching
+ # from a resource provided to a store provided snap, because there
+ # is no way for them to do that. Well, actually the operator could
+ # upload a zero byte resource, but then we would need to uninstall
+ # the snap before reinstalling from the store and that has the
+ # potential for data loss.
+ local_flag = get_local_flag(snapname)
+ if hookenv.has_juju_version("2.0"):
+ res_path = _resource_get(snapname)
+ if res_path is False:
+ _refresh_store(snapname, **kw)
+ reactive.clear_flag(local_flag)
+ else:
+ _install_local(res_path, **kw)
+ reactive.set_flag(local_flag)
+ else:
+ _refresh_store(snapname, **kw)
+ reactive.clear_flag(local_flag)
+
+
+def remove(snapname):
+ hookenv.log("Removing snap {}".format(snapname))
+ subprocess.check_call(["snap", "remove", snapname])
+ reactive.clear_flag(get_installed_flag(snapname))
+
+
+def connect(plug, slot):
+ """Connect or reconnect a snap plug with a slot.
+
+ Each argument must be a two element tuple, corresponding to
+ the two arguments to the 'snap connect' command.
+ """
+ hookenv.log("Connecting {} to {}".format(plug, slot), hookenv.DEBUG)
+ subprocess.check_call(["snap", "connect", plug, slot])
+
+
+def connect_all():
+ """Connect or reconnect all interface connections defined in layer.yaml.
+
+ This method will fail if called before all referenced snaps have been
+ installed.
+ """
+ opts = layer.options("snap")
+ for snapname, snap_opts in opts.items():
+ for plug, slot in snap_opts.get("connect", []):
+ connect(plug, slot)
+
+
+def disable(snapname):
+ """Disables a snap in the system
+
+ Sets the snap.disabled.{snapname} flag
+
+ This method doesn't affect any snap flag if requested snap does not
+ exist
+ """
+ hookenv.log("Disabling {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot disable {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "disable", snapname])
+ reactive.set_flag(get_disabled_flag(snapname))
+
+
+def enable(snapname):
+ """Enables a snap in the system
+
+ Clears the snap.disabled.{snapname} flag
+
+ This method doesn't affect any snap flag if requeted snap does not
+ exist
+ """
+ hookenv.log("Enabling {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot enable {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "enable", snapname])
+ reactive.clear_flag(get_disabled_flag(snapname))
+
+
+def restart(snapname):
+ """Restarts a snap in the system
+
+ This method doesn't affect any snap flag if requested snap does not
+ exist
+ """
+ hookenv.log("Restarting {} snap".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot restart {} snap because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "restart", snapname])
+
+
+def set(snapname, key, value):
+ """Changes configuration options in a snap
+
+ This method will fail if snapname is not an installed snap
+ """
+ hookenv.log("Set config {}={} for snap {}".format(key, value, snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot set {} snap config because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ subprocess.check_call(["snap", "set", snapname, "{}={}".format(key, value)])
+
+
+def set_refresh_timer(timer=""):
+ """Set the system refresh.timer option (snapd 2.31+)
+
+ This method sets how often snapd will refresh installed snaps. Call with
+ an empty timer string to use the system default (currently 4x per day).
+ Use 'max' to schedule refreshes as far into the future as possible
+ (currently 1 month). Also accepts custom timer strings as defined in the
+ refresh.timer section here:
+ https://forum.snapcraft.io/t/system-options/87
+
+ This method does not validate custom strings and will lead to a
+ CalledProcessError if an invalid string is given.
+
+ :param: timer: empty string (default), 'max', or custom string
+ """
+ if timer == "max":
+ # A month from yesterday is the farthest we should delay to safely stay
+ # under the 1 month max. Translate that to a valid refresh.timer value.
+ # Examples:
+ # - Today is Friday the 13th, set the refresh timer to
+ # 'thu2' (Thursday the 12th is the 2nd thursday of the month).
+ # - Today is Tuesday the 1st, set the refresh timer to
+ # 'mon5' (Monday the [28..31] is the 5th monday of the month).
+ yesterday = datetime.now() - timedelta(1)
+ dow = yesterday.strftime("%a").lower()
+ # increment after int division because we want occurrence 1-5, not 0-4.
+ occurrence = yesterday.day // 7 + 1
+ timer = "{}{}".format(dow, occurrence)
+
+ # NB: 'system' became synonymous with 'core' in 2.32.5, but we use 'core'
+ # here to ensure max compatibility.
+ set(snapname="core", key="refresh.timer", value=timer)
+ subprocess.check_call(["systemctl", "restart", "snapd.service"])
+
+
+def get(snapname, key):
+ """Gets configuration options for a snap
+
+ This method returns the stripped output from the snap get command.
+ This method will fail if snapname is not an installed snap.
+ """
+ hookenv.log("Get config {} for snap {}".format(key, snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get {} snap config because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+
+ return subprocess.check_output(["snap", "get", snapname, key]).strip()
+
+
+def get_installed_version(snapname):
+ """Gets the installed version of a snapname.
+ This function will fail if snapname is not an installed snap.
+ """
+ cmd = ["snap", "info", snapname]
+ hookenv.log("Get installed key for snap {}".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get {} snap installed version because it is not installed".format(snapname),
+ hookenv.WARNING,
+ )
+ return
+ return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("installed:")[-1].split()[0]
+
+
+def get_installed_channel(snapname):
+ """Gets the tracking (channel) of a snapname.
+ This function will fail if snapname is not an installed snap.
+ """
+ cmd = ["snap", "info", snapname]
+ hookenv.log("Get channel for snap {}".format(snapname))
+ if not reactive.is_flag_set(get_installed_flag(snapname)):
+ hookenv.log(
+ "Cannot get snap tracking (channel) because it is not installed",
+ hookenv.WARNING,
+ )
+ return
+ return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0]
+
+
+def _snap_args(
+ channel="stable",
+ devmode=False,
+ jailmode=False,
+ dangerous=False,
+ force_dangerous=False,
+ connect=None,
+ classic=False,
+ revision=None,
+):
+ yield "--channel={}".format(channel)
+ if devmode is True:
+ yield "--devmode"
+ if jailmode is True:
+ yield "--jailmode"
+ if force_dangerous is True or dangerous is True:
+ yield "--dangerous"
+ if classic is True:
+ yield "--classic"
+ if revision is not None:
+ yield "--revision={}".format(revision)
+
+
+def _install_local(path, **kw):
+ key = "snap.local.{}".format(path)
+ if data_changed(key, kw) or any_file_changed([path]):
+ cmd = ["snap", "install"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append("--dangerous")
+ cmd.append(path)
+ hookenv.log("Installing {} from local resource".format(path))
+ subprocess.check_call(cmd)
+
+
+def _install_store(snapname, **kw):
+ """Install snap from store
+
+ :param snapname: Name of snap to install
+ :type snapname: str
+ :param kw: Keyword arguments to pass on to ``snap install``
+ :type kw: Dict[str, str]
+ :raises: subprocess.CalledProcessError
+ """
+ cmd = ["snap", "install"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append(snapname)
+ hookenv.log("Installing {} from store".format(snapname))
+
+ for attempt in tenacity.Retrying(
+ wait=tenacity.wait_fixed(10), # seconds
+ stop=tenacity.stop_after_attempt(3),
+ reraise=True,
+ ):
+ with attempt:
+ try:
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ hookenv.log(
+ 'Installation successful cmd="{}" output="{}"'.format(cmd, out),
+ level=hookenv.DEBUG,
+ )
+ reactive.clear_flag(get_local_flag(snapname))
+ except subprocess.CalledProcessError as cp:
+ hookenv.log(
+ 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output),
+ level=hookenv.ERROR,
+ )
+ raise
+
+
+def _refresh_store(snapname, **kw):
+ if not data_changed("snap.opts.{}".format(snapname), kw):
+ return
+
+ # --amend allows us to refresh from a local resource
+ cmd = ["snap", "refresh", "--amend"]
+ cmd.extend(_snap_args(**kw))
+ cmd.append(snapname)
+ hookenv.log("Refreshing {} from store".format(snapname))
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ print(out)
+
+
+def _resource_get(snapname):
+ """Used to fetch the resource path of the given name.
+
+ This wrapper obtains a resource path and adds an additional
+ check to return False if the resource is zero length.
+ """
+ res_path = hookenv.resource_get(snapname)
+ if res_path and os.stat(res_path).st_size != 0:
+ return res_path
+ return False
+
+
+def get_available_refreshes():
+ """Return a list of snaps which have refreshes available."""
+ try:
+ out = subprocess.check_output(["snap", "refresh", "--list"]).decode("utf8")
+ except subprocess.CalledProcessError:
+ # If snap refresh fails for whatever reason, we should just return no
+ # refreshes available - LP:1869630.
+ return []
+
+ if out == "All snaps up to date.":
+ return []
+ else:
+ return [line.split()[0] for line in out.splitlines()[1:]]
+
+
+def is_refresh_available(snapname):
+ """Check whether a new revision is available for the given snap."""
+ return reactive.is_flag_set(get_refresh_available_flag(snapname))
+
+
+def _check_refresh_available(snapname):
+ return snapname in get_available_refreshes()
+
+
+def create_cohort_snapshot(snapname):
+ """Create a new cohort key for the given snap.
+
+ Cohort keys represent a snapshot of the revision of a snap at the time
+ the key was created. These keys can then be used on any machine to lock
+ the revision of the snap until a new cohort is joined (or the key expires,
+ after 90 days). This is used to maintain consistency of the revision of
+ the snap across units or applications, and to manage the refresh of the
+ snap in a controlled manner.
+
+ Returns a cohort key.
+ """
+ out = subprocess.check_output(["snap", "create-cohort", snapname])
+ data = yaml.safe_load(out.decode("utf8"))
+ return data["cohorts"][snapname]["cohort-key"]
+
+
+def join_cohort_snapshot(snapname, cohort_key):
+ """Refresh the snap into the given cohort.
+
+ If the snap was previously in a cohort, this will update the revision
+ to that of the new cohort snapshot. Note that this does not change the
+ channel that the snap is in, only the revision within that channel.
+ """
+ if is_local(snapname):
+ # joining a cohort can override a locally installed snap
+ hookenv.log("Skipping joining cohort for local snap: " "{}".format(snapname))
+ return
+ subprocess.check_output(["snap", "refresh", snapname, "--cohort", cohort_key])
+ # even though we just refreshed to the latest in the cohort, it's
+ # slightly possible that there's a newer rev available beyond the cohort
+ reactive.toggle_flag(get_refresh_available_flag(snapname), _check_refresh_available(snapname))
diff --git a/kubernetes-worker/lib/charms/layer/tls_client.py b/kubernetes-worker/lib/charms/layer/tls_client.py
new file mode 100644
index 0000000..b2980dc
--- /dev/null
+++ b/kubernetes-worker/lib/charms/layer/tls_client.py
@@ -0,0 +1,61 @@
+# Copyright 2016-2017 Canonical Ltd.
+#
+# This file is part of the tls-client layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import log
+from charmhelpers.core import unitdata
+
+from charms.reactive import remove_state
+from charms.reactive import endpoint_from_flag
+
+
+def reset_certificate_write_flag(cert_type):
+ """
+ Reset the certificate written flag so notification will work on the next
+ write cert_type must be 'server', 'client', or 'ca' to indicate type of
+ certificate
+ """
+ if cert_type not in ['server', 'client', 'ca']:
+ log('Unknown certificate type!')
+ else:
+ remove_state('tls_client.{0}.certificate.written'.format(cert_type))
+
+
+def request_server_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_server_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('server', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
+
+
+def request_client_cert(common_name, sans=None, crt_path=None, key_path=None):
+ tls = endpoint_from_flag('certificates.available')
+ tls.request_client_cert(common_name, sans)
+ if not crt_path and not key_path:
+ return
+ kv = unitdata.kv()
+ cert_paths = kv.get('layer.tls-client.cert-paths', {})
+ cert_paths.setdefault('client', {})[common_name] = {
+ 'crt': str(crt_path),
+ 'key': str(key_path),
+ }
+ kv.set('layer.tls-client.cert-paths', cert_paths)
diff --git a/kubernetes-worker/lib/charms/leadership.py b/kubernetes-worker/lib/charms/leadership.py
new file mode 100644
index 0000000..d2a95fa
--- /dev/null
+++ b/kubernetes-worker/lib/charms/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.reactive import not_unless
+
+
+__all__ = ['leader_get', 'leader_set']
+
+
+@not_unless('leadership.is_leader')
+def leader_set(*args, **kw):
+ '''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
+
+ Settings may either be passed in as a single dictionary, or using
+ keyword arguments. All values must be strings.
+
+ The leadership.set.{key} reactive state will be set while the
+ leadership hook environment setting remains set.
+
+ Changed leadership settings will set the leadership.changed.{key}
+ and leadership.changed states. These states will remain set until
+ the following hook.
+
+ These state changes take effect immediately on the leader, and
+ in future hooks run on non-leaders. In this way both leaders and
+ non-leaders can share handlers, waiting on these states.
+ '''
+ if args:
+ if len(args) > 1:
+ raise TypeError('leader_set() takes 1 positional argument but '
+ '{} were given'.format(len(args)))
+ else:
+ settings = dict(args[0])
+ else:
+ settings = {}
+ settings.update(kw)
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+
+ for key, value in settings.items():
+ if value != previous.get(key):
+ reactive.set_state('leadership.changed.{}'.format(key))
+ reactive.set_state('leadership.changed')
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ hookenv.leader_set(settings)
+ unitdata.kv().update(settings, prefix='leadership.settings.')
+
+
+def leader_get(attribute=None):
+ '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
+ return hookenv.leader_get(attribute)
diff --git a/kubernetes-worker/lib/debug_script.py b/kubernetes-worker/lib/debug_script.py
new file mode 100644
index 0000000..e156924
--- /dev/null
+++ b/kubernetes-worker/lib/debug_script.py
@@ -0,0 +1,8 @@
+import os
+
+dir = os.environ["DEBUG_SCRIPT_DIR"]
+
+
+def open_file(path, *args, **kwargs):
+ """ Open a file within the debug script dir """
+ return open(os.path.join(dir, path), *args, **kwargs)
diff --git a/kubernetes-worker/lxd-profile.yaml b/kubernetes-worker/lxd-profile.yaml
new file mode 100644
index 0000000..6b4babc
--- /dev/null
+++ b/kubernetes-worker/lxd-profile.yaml
@@ -0,0 +1,16 @@
+name: juju-default-k8s-deployment-0
+config:
+ linux.kernel_modules: ip_tables,ip6_tables,netlink_diag,nf_nat,overlay
+ raw.lxc: |
+ lxc.apparmor.profile=unconfined
+ lxc.mount.auto=proc:rw sys:rw
+ lxc.cgroup.devices.allow=a
+ lxc.cap.drop=
+ security.nesting: true
+ security.privileged: true
+description: ""
+devices:
+ aadisable:
+ path: /dev/kmsg
+ source: /dev/kmsg
+ type: unix-char
diff --git a/kubernetes-worker/metadata.yaml b/kubernetes-worker/metadata.yaml
new file mode 100644
index 0000000..65315cb
--- /dev/null
+++ b/kubernetes-worker/metadata.yaml
@@ -0,0 +1,92 @@
+"name": "kubernetes-worker"
+"summary": "The workload bearing units of a kubernetes cluster"
+"maintainers":
+- "Tim Van Steenburgh "
+- "George Kraft "
+- "Rye Terrell "
+- "Konstantinos Tsakalozos "
+- "Charles Butler "
+- "Matthew Bruzek "
+- "Mike Wilson "
+- "Joe Borg "
+"description": |
+ Kubernetes is an open-source platform for deploying, scaling, and operations
+ of application containers across a cluster of hosts. Kubernetes is portable
+ in that it works with public, private, and hybrid clouds. Extensible through
+ a pluggable infrastructure. Self healing in that it will automatically
+ restart and place containers on healthy nodes if a node ever goes away.
+"tags":
+- "misc"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "certificates":
+ "interface": "tls-certificates"
+ "kube-api-endpoint":
+ "interface": "http"
+ "kube-dns":
+ # kube-dns is deprecated. Its functionality has been rolled into the
+ # kube-control interface. The kube-dns relation will be removed in
+ # a future release.
+ "interface": "kube-dns"
+ "kube-control":
+ "interface": "kube-control"
+ "aws":
+ "interface": "aws-integration"
+ "gcp":
+ "interface": "gcp-integration"
+ "openstack":
+ "interface": "openstack-integration"
+ "vsphere":
+ "interface": "vsphere-integration"
+ "azure":
+ "interface": "azure-integration"
+ "nfs":
+ "interface": "mount"
+"provides":
+ "nrpe-external-master":
+ "interface": "nrpe-external-master"
+ "scope": "container"
+ "container-runtime":
+ "interface": "container-runtime"
+ "scope": "container"
+ "cni":
+ "interface": "kubernetes-cni"
+ "scope": "container"
+ "ingress-proxy":
+ "interface": "http"
+"peers":
+ "coordinator":
+ "interface": "coordinator"
+"resources":
+ "cni-amd64":
+ "type": "file"
+ "filename": "cni.tgz"
+ "description": "CNI plugins for amd64"
+ "cni-arm64":
+ "type": "file"
+ "filename": "cni.tgz"
+ "description": "CNI plugins for arm64"
+ "cni-s390x":
+ "type": "file"
+ "filename": "cni.tgz"
+ "description": "CNI plugins for s390x"
+ "core":
+ "type": "file"
+ "filename": "core.snap"
+ "description": "core snap"
+ "kubectl":
+ "type": "file"
+ "filename": "kubectl.snap"
+ "description": "kubectl snap"
+ "kubelet":
+ "type": "file"
+ "filename": "kubelet.snap"
+ "description": "kubelet snap"
+ "kube-proxy":
+ "type": "file"
+ "filename": "kube-proxy.snap"
+ "description": "kube-proxy snap"
+"subordinate": !!bool "false"
diff --git a/kubernetes-worker/metrics.yaml b/kubernetes-worker/metrics.yaml
new file mode 100644
index 0000000..0fcb3c1
--- /dev/null
+++ b/kubernetes-worker/metrics.yaml
@@ -0,0 +1,2 @@
+metrics:
+ juju-units: {}
diff --git a/kubernetes-worker/reactive/__init__.py b/kubernetes-worker/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/kubernetes-worker/reactive/cdk_service_kicker.py b/kubernetes-worker/reactive/cdk_service_kicker.py
new file mode 100644
index 0000000..f7fd33a
--- /dev/null
+++ b/kubernetes-worker/reactive/cdk_service_kicker.py
@@ -0,0 +1,32 @@
+import os
+import subprocess
+from charms import layer
+from charms.reactive import hook, when_not, remove_state, set_state
+from charmhelpers.core.templating import render
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ remove_state('cdk-service-kicker.installed')
+
+
+@when_not('cdk-service-kicker.installed')
+def install_cdk_service_kicker():
+ ''' Installs the cdk-service-kicker service. Workaround for
+ https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+ '''
+ source = 'cdk-service-kicker'
+ dest = '/usr/bin/cdk-service-kicker'
+ services = layer.options('cdk-service-kicker').get('services')
+ context = {'services': ' '.join(services)}
+ render(source, dest, context)
+ os.chmod('/usr/bin/cdk-service-kicker', 0o775)
+
+ source = 'cdk-service-kicker.service'
+ dest = '/etc/systemd/system/cdk-service-kicker.service'
+ context = {}
+ render(source, dest, context)
+ command = ['systemctl', 'enable', 'cdk-service-kicker']
+ subprocess.check_call(command)
+
+ set_state('cdk-service-kicker.installed')
diff --git a/kubernetes-worker/reactive/coordinator.py b/kubernetes-worker/reactive/coordinator.py
new file mode 100644
index 0000000..474a95d
--- /dev/null
+++ b/kubernetes-worker/reactive/coordinator.py
@@ -0,0 +1,71 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Coordinator Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charms.coordinator import coordinator, log
+import charms.reactive
+
+
+def initialize_coordinator_state():
+ '''
+ The coordinator.granted.{lockname} state will be set and the
+ coordinator.requested.{lockname} state removed for every lock
+ granted to the currently running hook.
+
+ The coordinator.requested.{lockname} state will remain set for locks
+ not yet granted
+ '''
+ log('Initializing coordinator layer')
+
+ requested = set(coordinator.requests.get(hookenv.local_unit(), {}).keys())
+ previously_requested = set(state.split('.', 2)[2]
+ for state in charms.reactive.bus.get_states()
+ if state.startswith('coordinator.requested.'))
+
+ granted = set(coordinator.grants.get(hookenv.local_unit(), {}).keys())
+ previously_granted = set(state.split('.', 2)[2]
+ for state in charms.reactive.bus.get_states()
+ if state.startswith('coordinator.granted.'))
+
+ # Set reactive state for requested locks.
+ for lock in requested:
+ log('Requested {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.set_state('coordinator.requested.{}'.format(lock))
+
+ # Set reactive state for locks that have been granted.
+ for lock in granted:
+ log('Granted {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.set_state('coordinator.granted.{}'.format(lock))
+
+ # Remove reactive state for locks that have been released.
+ for lock in (previously_granted - granted):
+ log('Dropped {} lock'.format(lock), hookenv.DEBUG)
+ charms.reactive.remove_state('coordinator.granted.{}'.format(lock))
+
+ # Remove requested state for locks no longer requested and not granted.
+ for lock in (previously_requested - requested - granted):
+ log('Request for {} lock was dropped'.format(lock), hookenv.DEBUG)
+ charms.reactive.remove_state('coordinator.requested.{}'.format(lock))
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(charms.reactive, '_coordinator_registered'):
+ hookenv.atstart(initialize_coordinator_state)
+ charms.reactive._coordinator_registered = True
diff --git a/kubernetes-worker/reactive/kubernetes_master_worker_base.py b/kubernetes-worker/reactive/kubernetes_master_worker_base.py
new file mode 100644
index 0000000..0bb1de4
--- /dev/null
+++ b/kubernetes-worker/reactive/kubernetes_master_worker_base.py
@@ -0,0 +1,88 @@
+from charms.layer import snap
+from charms.leadership import (
+ leader_get,
+ leader_set
+)
+from charms.reactive import (
+ when,
+ when_not,
+ when_any,
+ data_changed
+)
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.host import is_container
+from charmhelpers.core.sysctl import create as create_sysctl
+
+
+@when_any('kubernetes-master.snaps.installed',
+ 'kubernetes-worker.snaps.installed')
+@when('snap.refresh.set')
+@when('leadership.is_leader')
+def process_snapd_timer():
+ """
+ Set the snapd refresh timer on the leader so all cluster members
+ (present and future) will refresh near the same time.
+
+ :return: None
+ """
+ # Get the current snapd refresh timer; we know layer-snap has set this
+ # when the 'snap.refresh.set' flag is present.
+ timer = snap.get(
+ snapname='core', key='refresh.timer').decode('utf-8').strip()
+ if not timer:
+ # The core snap timer is empty. This likely means a subordinate timer
+ # reset ours. Try to set it back to a previously leader-set value,
+ # falling back to config if needed. Luckily, this should only happen
+ # during subordinate install, so this should remain stable afterward.
+ timer = leader_get('snapd_refresh') or hookenv.config('snapd_refresh')
+ snap.set_refresh_timer(timer)
+
+ # Ensure we have the timer known by snapd (it may differ from config).
+ timer = snap.get(
+ snapname='core', key='refresh.timer').decode('utf-8').strip()
+
+ # The first time through, data_changed will be true. Subsequent calls
+ # should only update leader data if something changed.
+ if data_changed('snapd_refresh', timer):
+ hookenv.log('setting leader snapd_refresh timer to: {}'.format(timer))
+ leader_set({'snapd_refresh': timer})
+
+
+@when_any('kubernetes-master.snaps.installed',
+ 'kubernetes-worker.snaps.installed')
+@when('snap.refresh.set')
+@when('leadership.changed.snapd_refresh')
+@when_not('leadership.is_leader')
+def set_snapd_timer():
+ """
+ Set the snapd refresh.timer on non-leader cluster members.
+
+ :return: None
+ """
+ # NB: This method should only be run when 'snap.refresh.set' is present.
+ # Layer-snap will always set a core refresh.timer, which may not be the
+ # same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
+ # has finished and we are free to set our config to the leader's timer.
+ timer = leader_get('snapd_refresh') or '' # None will error
+ hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
+ snap.set_refresh_timer(timer)
+
+
+@when('config.changed.sysctl')
+def write_sysctl():
+ """
+ :return: None
+ """
+ sysctl_settings = hookenv.config('sysctl')
+ if sysctl_settings and not is_container():
+ create_sysctl(
+ sysctl_settings,
+ '/etc/sysctl.d/50-kubernetes-charm.conf',
+ # Some keys in the config may not exist in /proc/sys/net/.
+ # For example, the conntrack module may not be loaded when
+ # using lxd drivers insteam of kvm. In these cases, we
+ # simply ignore the missing keys, rather than making time
+ # consuming calls out to the filesystem to check for their
+ # existence.
+ ignore=True)
diff --git a/kubernetes-worker/reactive/kubernetes_worker.py b/kubernetes-worker/reactive/kubernetes_worker.py
new file mode 100644
index 0000000..a7fab9c
--- /dev/null
+++ b/kubernetes-worker/reactive/kubernetes_worker.py
@@ -0,0 +1,1544 @@
+#!/usr/bin/env python
+
+# Copyright 2015 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import shutil
+import subprocess
+import time
+import traceback
+import yaml
+
+from base64 import b64encode
+from subprocess import check_call, check_output
+from subprocess import CalledProcessError
+from socket import gethostname
+
+import charms.coordinator
+from charms import layer
+from charms.layer import snap
+from charms.reactive import hook
+from charms.reactive import endpoint_from_flag
+from charms.reactive import remove_state, clear_flag
+from charms.reactive import set_state, set_flag
+from charms.reactive import is_state, is_flag_set
+from charms.reactive import when, when_any, when_not, when_none
+from charms.reactive import data_changed, is_data_changed
+from charms.templating.jinja2 import render
+
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.core.host import service_stop, service_restart
+from charmhelpers.core.host import service_pause, service_resume
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charms.layer import kubernetes_common
+
+from charms.layer.kubernetes_common import kubeclientconfig_path
+from charms.layer.kubernetes_common import migrate_resource_checksums
+from charms.layer.kubernetes_common import check_resources_for_upgrade_needed
+from charms.layer.kubernetes_common import calculate_and_store_resource_checksums # noqa
+from charms.layer.kubernetes_common import create_kubeconfig
+from charms.layer.kubernetes_common import kubectl
+from charms.layer.kubernetes_common import arch, get_node_name
+from charms.layer.kubernetes_common import configure_kubernetes_service
+from charms.layer.kubernetes_common import parse_extra_args
+from charms.layer.kubernetes_common import cloud_config_path
+from charms.layer.kubernetes_common import write_gcp_snap_config
+from charms.layer.kubernetes_common import write_azure_snap_config
+from charms.layer.kubernetes_common import kubeproxyconfig_path
+from charms.layer.kubernetes_common import configure_kube_proxy
+from charms.layer.kubernetes_common import get_version
+from charms.layer.kubernetes_common import ca_crt_path
+from charms.layer.kubernetes_common import server_crt_path
+from charms.layer.kubernetes_common import server_key_path
+from charms.layer.kubernetes_common import client_crt_path
+from charms.layer.kubernetes_common import client_key_path
+from charms.layer.kubernetes_common import get_unit_number
+from charms.layer.kubernetes_common import _get_vmware_uuid
+
+from charms.layer.nagios import install_nagios_plugin_from_text
+from charms.layer.nagios import remove_nagios_plugin
+
+# Override the default nagios shortname regex to allow periods, which we
+# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
+# default regex in charmhelpers doesn't allow periods, but nagios itself does.
+nrpe.Check.shortname_re = r'[\.A-Za-z0-9-_]+$'
+nrpe_kubeconfig_path = '/var/lib/nagios/.kube/config'
+
+kubeconfig_path = '/root/cdk/kubeconfig'
+gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
+snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
+worker_services = ('kubelet', 'kube-proxy')
+checksum_prefix = 'kubernetes-worker.resource-checksums.'
+configure_prefix = 'kubernetes-worker.prev_args.'
+cpu_manager_state = "/var/lib/kubelet/cpu_manager_state"
+
+cohort_snaps = ['kubectl', 'kubelet', 'kube-proxy']
+
+os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
+db = unitdata.kv()
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ # migrate to new flags
+ if is_state('kubernetes-worker.restarted-for-cloud'):
+ remove_state('kubernetes-worker.restarted-for-cloud')
+ set_state('kubernetes-worker.cloud.ready')
+ if is_state('kubernetes-worker.cloud-request-sent'):
+ # minor change, just for consistency
+ remove_state('kubernetes-worker.cloud-request-sent')
+ set_state('kubernetes-worker.cloud.request-sent')
+
+ set_state('config.changed.install_from_upstream')
+ hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
+
+ cleanup_pre_snap_services()
+ migrate_resource_checksums(checksum_prefix, snap_resources)
+ if check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
+ set_upgrade_needed()
+
+ # Remove the RC for nginx ingress if it exists
+ if hookenv.config().get('ingress'):
+ set_state('kubernetes-worker.remove-old-ingress')
+
+ # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
+ # since they can differ between k8s versions
+ if is_state('kubernetes-worker.gpu.enabled'):
+ remove_state('kubernetes-worker.gpu.enabled')
+ try:
+ disable_gpu()
+ except ApplyNodeLabelFailed:
+ # Removing node label failed. Probably the master is unavailable.
+ # Proceed with the upgrade in hope GPUs will still be there.
+ hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
+
+ if hookenv.config('ingress'):
+ set_state('kubernetes-worker.ingress.enabled')
+ else:
+ remove_state('kubernetes-worker.ingress.enabled')
+
+ # force certs to be updated
+ if all(is_state(flag) for flag in ('certificates.available',
+ 'kube-control.connected',
+ 'cni.available',
+ 'kube-control.dns.available')):
+ send_data()
+
+ if is_state('kubernetes-worker.registry.configured'):
+ set_state('kubernetes-master-worker-base.registry.configured')
+ remove_state('kubernetes-worker.registry.configured')
+
+ # need to clear cni.available state if it's no longer accurate
+ if is_state('cni.available'):
+ cni = endpoint_from_flag('cni.available')
+ if not cni.config_available():
+ hookenv.log('cni.config_available() is False, clearing'
+ + ' cni.available flag')
+ remove_state('cni.available')
+
+ # need to bump the kube-control relation in case
+ # kube-control.default_cni.available is not set when it should be
+ if is_state('kube-control.connected'):
+ kube_control = endpoint_from_flag('kube-control.connected')
+ kube_control.manage_flags()
+
+ remove_state('kubernetes-worker.cni-plugins.installed')
+ remove_state('kubernetes-worker.config.created')
+ remove_state('kubernetes-worker.ingress.available')
+ remove_state('worker.auth.bootstrapped')
+ remove_state('nfs.configured')
+ set_state('kubernetes-worker.restart-needed')
+
+
+@hook('pre-series-upgrade')
+def pre_series_upgrade():
+ # NB: We use --force here because unmanaged pods are going to die anyway
+ # when the node is shut down, and it's better to let drain cleanly
+ # terminate them. We use --delete-local-data because the dashboard, at
+ # least, uses local data (emptyDir); but local data is documented as being
+ # ephemeral anyway, so we can assume it should be ok.
+ kubectl('drain', get_node_name(), '--ignore-daemonsets', '--force',
+ '--delete-local-data')
+ service_pause('snap.kubelet.daemon')
+ service_pause('snap.kube-proxy.daemon')
+
+
+@hook('post-series-upgrade')
+def post_series_upgrade():
+ service_resume('snap.kubelet.daemon')
+ service_resume('snap.kube-proxy.daemon')
+ kubectl('uncordon', get_node_name())
+
+
+@when('kubernetes-worker.remove-old-ingress')
+def remove_old_ingress():
+ try:
+ kubectl('delete', 'rc', 'nginx-ingress-controller',
+ '--ignore-not-found')
+
+ # these moved into a different namespace for 1.12
+ kubectl('delete', 'rc', 'default-http-backend',
+ '--ignore-not-found')
+ kubectl('delete', 'svc', 'default-http-backend',
+ '--ignore-not-found')
+ kubectl('delete', 'ds', 'nginx-ingress-{}-controller'.format(
+ hookenv.service_name()), '--ignore-not-found')
+ kubectl('delete', 'serviceaccount',
+ 'nginx-ingress-{}-serviceaccount'.format(
+ hookenv.service_name()), '--ignore-not-found')
+ kubectl('delete', 'clusterrolebinding',
+ 'nginx-ingress-clusterrole-nisa-{}-binding'.format(
+ hookenv.service_name()), '--ignore-not-found')
+ kubectl('delete', 'configmap',
+ 'nginx-load-balancer-{}-conf'.format(
+ hookenv.service_name()), '--ignore-not-found')
+ except CalledProcessError:
+ # try again next time
+ return
+
+ remove_state('kubernetes-worker.remove-old-ingress')
+
+
+def set_upgrade_needed():
+ set_state('kubernetes-worker.snaps.upgrade-needed')
+ config = hookenv.config()
+ previous_channel = config.previous('channel')
+ require_manual = config.get('require-manual-upgrade')
+ if previous_channel is None or not require_manual:
+ set_state('kubernetes-worker.snaps.upgrade-specified')
+
+
+def cleanup_pre_snap_services():
+ # remove old states
+ remove_state('kubernetes-worker.components.installed')
+
+ # disable old services
+ services = ['kubelet', 'kube-proxy']
+ for service in services:
+ hookenv.log('Stopping {0} service.'.format(service))
+ service_stop(service)
+
+ # cleanup old files
+ files = [
+ "/lib/systemd/system/kubelet.service",
+ "/lib/systemd/system/kube-proxy.service",
+ "/etc/default/kube-default",
+ "/etc/default/kubelet",
+ "/etc/default/kube-proxy",
+ "/usr/local/bin/kubectl",
+ "/usr/local/bin/kubelet",
+ "/usr/local/bin/kube-proxy",
+ "/etc/kubernetes"
+ ]
+ for file in files:
+ if os.path.isdir(file):
+ hookenv.log("Removing directory: " + file)
+ shutil.rmtree(file)
+ elif os.path.isfile(file):
+ hookenv.log("Removing file: " + file)
+ os.remove(file)
+
+
+@when('config.changed.channel')
+def channel_changed():
+ set_upgrade_needed()
+
+
+@when('kubernetes-worker.snaps.upgrade-specified')
+def install_snaps():
+ channel = hookenv.config('channel')
+ hookenv.status_set('maintenance', 'Installing core snap')
+ snap.install('core')
+ hookenv.status_set('maintenance', 'Installing kubectl snap')
+ snap.install('kubectl', channel=channel, classic=True)
+ hookenv.status_set('maintenance', 'Installing kubelet snap')
+ snap.install('kubelet', channel=channel, classic=True)
+ hookenv.status_set('maintenance', 'Installing kube-proxy snap')
+ snap.install('kube-proxy', channel=channel, classic=True)
+ calculate_and_store_resource_checksums(checksum_prefix, snap_resources)
+ set_state('kubernetes-worker.snaps.installed')
+ set_state('kubernetes-worker.restart-needed')
+ remove_state('kubernetes-worker.snaps.upgrade-needed')
+ remove_state('kubernetes-worker.snaps.upgrade-specified')
+
+
+@when('kubernetes-worker.snaps.installed',
+ 'kube-control.cohort_keys.available')
+@when_none('coordinator.granted.cohort',
+ 'coordinator.requested.cohort')
+def safely_join_cohort():
+ '''Coordinate the rollout of snap refreshes.
+
+ When cohort keys change, grab a lock so that only 1 unit in the
+ application joins the new cohort at a time. This allows us to roll out
+ snap refreshes without risking all units going down at once.
+ '''
+ kube_control = endpoint_from_flag('kube-control.cohort_keys.available')
+
+ cohort_keys = kube_control.cohort_keys
+ if is_data_changed('master-cohorts', cohort_keys):
+ clear_flag('kubernetes-worker.cohorts.joined')
+ charms.coordinator.acquire('cohort')
+
+
+@when('kubernetes-worker.snaps.installed',
+ 'kube-control.cohort_keys.available',
+ 'coordinator.granted.cohort')
+@when_not('kubernetes-worker.cohorts.joined')
+def join_or_update_cohorts():
+ '''Join or update a cohort snapshot.
+
+ All units of this application (leader and followers) need to refresh their
+ installed snaps to the current cohort snapshot.
+ '''
+ kube_control = endpoint_from_flag('kube-control.cohort_keys.available')
+ cohort_keys = kube_control.cohort_keys
+ for snapname in cohort_snaps:
+ hookenv.status_set('maintenance', 'Joining cohort for {}.'.format(snapname))
+ cohort_key = cohort_keys[snapname]
+ for delay in (5, 30, 60):
+ try:
+ snap.join_cohort_snapshot(snapname, cohort_key)
+ hookenv.log('Joined cohort for {}'.format(snapname))
+ break
+ except subprocess.CalledProcessError:
+ hookenv.log('Error joining cohort for {}'.format(snapname),
+ level=hookenv.ERROR)
+ hookenv.status_set('maintenance',
+ 'Error joining cohort for {} (see logs), '
+ 'will retry.'.format(snapname))
+ time.sleep(delay)
+ else:
+ set_flag('kubernetes-worker.cohorts.failed')
+ return
+ # Update our cache of the cohort keys, now that they're successfully applied.
+ data_changed('master-cohorts', cohort_keys)
+ set_flag('kubernetes-worker.cohorts.joined')
+ clear_flag('kubernetes-worker.cohorts.failed')
+
+
+@when_none('coordinator.granted.cohort',
+ 'coordinator.requested.cohort')
+@when('kubernetes-worker.cohorts.failed')
+def reaquire_coordinator_lock():
+ # We can't do this in the same hook that the cohort join failed,
+ # because if we request the lock when we already have it, it's
+ # treated as a no-op and then dropped at the end of the hook.
+ charms.coordinator.acquire('cohort')
+
+
+@hook('stop')
+def shutdown():
+ ''' When this unit is destroyed:
+ - delete the current node
+ - stop the worker services
+ '''
+ try:
+ if os.path.isfile(kubeconfig_path):
+ kubectl('delete', 'node', get_node_name())
+ except CalledProcessError:
+ hookenv.log('Failed to unregister node.')
+ service_stop('snap.kubelet.daemon')
+ service_stop('snap.kube-proxy.daemon')
+
+
+@when('endpoint.container-runtime.available')
+@when_not('kubernetes-worker.cni-plugins.installed')
+def install_cni_plugins():
+ ''' Unpack the cni-plugins resource '''
+ # Get the resource via resource_get
+ try:
+ resource_name = 'cni-{}'.format(arch())
+ archive = hookenv.resource_get(resource_name)
+ except Exception:
+ message = 'Error fetching the cni resource.'
+ hookenv.log(message)
+ hookenv.status_set('blocked', message)
+ return
+
+ if not archive:
+ hookenv.log('Missing cni resource.')
+ hookenv.status_set('blocked', 'Missing cni resource.')
+ return
+
+ # Handle null resource publication, we check if filesize < 1mb
+ filesize = os.stat(archive).st_size
+ if filesize < 1000000:
+ hookenv.status_set('blocked', 'Incomplete cni resource.')
+ return
+
+ hookenv.status_set('maintenance', 'Unpacking cni resource.')
+
+ unpack_path = '/opt/cni/bin'
+ os.makedirs(unpack_path, exist_ok=True)
+ cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
+ hookenv.log(cmd)
+ check_call(cmd)
+
+ # Used by the "registry" action. The action is run on a single worker, but
+ # the registry pod can end up on any worker, so we need this directory on
+ # all the workers.
+ os.makedirs('/srv/registry', exist_ok=True)
+
+ set_state('kubernetes-worker.cni-plugins.installed')
+
+
+@when('kubernetes-worker.snaps.installed')
+def set_app_version():
+ ''' Declare the application version to juju '''
+ cmd = ['kubelet', '--version']
+ version = check_output(cmd)
+ hookenv.application_version_set(version.split(b' v')[-1].rstrip())
+
+
+@hookenv.atexit
+def charm_status():
+ '''Update the status message with the current status of kubelet.'''
+ container_runtime_connected = \
+ is_state('endpoint.container-runtime.joined')
+ vsphere_joined = is_state('endpoint.vsphere.joined')
+ azure_joined = is_state('endpoint.azure.joined')
+ cloud_blocked = is_state('kubernetes-worker.cloud.blocked')
+
+ if is_state('upgrade.series.in-progress'):
+ hookenv.status_set('blocked',
+ 'Series upgrade in progress')
+ return
+ if not is_flag_set('certificates.available'):
+ hookenv.status_set('blocked', 'Missing relation to certificate authority.')
+ return
+ if not container_runtime_connected:
+ hookenv.status_set('blocked',
+ 'Connect a container runtime.')
+ return
+ if vsphere_joined and cloud_blocked:
+ hookenv.status_set('blocked',
+ 'vSphere integration requires K8s 1.12 or greater')
+ return
+ if azure_joined and cloud_blocked:
+ hookenv.status_set('blocked',
+ 'Azure integration requires K8s 1.11 or greater')
+ return
+ if is_state('kubernetes-worker.cloud.pending'):
+ hookenv.status_set('waiting', 'Waiting for cloud integration')
+ return
+ if is_state('kubernetes-worker.cohorts.failed'):
+ hookenv.status_set('waiting',
+ 'Failed to join snap cohorts (see logs), will retry.')
+ if not is_state('kube-control.auth.available'):
+ hookenv.status_set('waiting', 'Waiting for cluster credentials.')
+ return
+ if not is_state('kube-control.dns.available'):
+ # During deployment the worker has to start kubelet without cluster dns
+ # configured. If this is the first unit online in a service pool
+ # waiting to self host the dns pod, and configure itself to query the
+ # dns service declared in the kube-system namespace
+ hookenv.status_set('waiting', 'Waiting for cluster DNS.')
+ return
+ if is_state('kubernetes-worker.snaps.upgrade-specified'):
+ hookenv.status_set('waiting', 'Upgrade pending')
+ return
+ if is_state('kubernetes-worker.snaps.upgrade-needed'):
+ hookenv.status_set('blocked',
+ 'Needs manual upgrade, run the upgrade action')
+ return
+ if is_state('kubernetes-worker.snaps.installed'):
+ update_kubelet_status()
+ return
+ else:
+ pass # will have been set by snap layer or other handler
+
+
+def deprecated_extra_args():
+ '''Returns a list of tuples (config_key, arg) for args that have been set
+ via extra-args, but are deprecated.
+
+ This works by parsing help output, which can be brittle. Be cautious when
+ calling this.
+ '''
+ deprecated_args = []
+ services = [
+ # service config_key
+ ('kubelet', 'kubelet-extra-args'),
+ ('kube-proxy', 'proxy-extra-args')
+ ]
+ for service, config_key in services:
+ # Parse help output into a format we can check easily
+ cmd = [service, '-h']
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ sections = re.split(r'\n\s*--', output.decode('utf-8'))[1:]
+ partitioned_sections = [section.partition(' ') for section in sections]
+ arg_help = {part[0]: part[2] for part in partitioned_sections}
+
+ # Check extra-args against the help output
+ extra_args = parse_extra_args(config_key)
+ for arg in extra_args:
+ if arg not in arg_help:
+ # This is most likely a problem, though it could also be
+ # intentional use of a hidden arg. Let's just log a warning.
+ hookenv.log(
+ '%s: %s is missing from help output' % (config_key, arg),
+ level='WARNING'
+ )
+ elif 'DEPRECATED:' in arg_help[arg]:
+ deprecated_args.append((config_key, arg))
+ return deprecated_args
+
+
+def update_kubelet_status():
+ ''' There are different states that the kubelet can be in, where we are
+ waiting for dns, waiting for cluster turnup, or ready to serve
+ applications.'''
+ # deprecated_extra_args is brittle, be cautious
+ deprecated_args = []
+ try:
+ deprecated_args = deprecated_extra_args()
+ except Exception:
+ # this isn't vital, log it and move on
+ traceback.print_exc()
+ if deprecated_args:
+ messages = ['%s: %s is deprecated' % arg for arg in deprecated_args]
+ for message in messages:
+ hookenv.log(message, level='WARNING')
+ status = messages[0]
+ if len(messages) > 1:
+ other_count = len(messages) - 1
+ status += " (+%d others, see juju debug-log)" % other_count
+ hookenv.status_set('blocked', status)
+ return
+
+ services = [
+ 'kubelet',
+ 'kube-proxy'
+ ]
+ failing_services = []
+ for service in services:
+ daemon = 'snap.{}.daemon'.format(service)
+ if not _systemctl_is_active(daemon):
+ failing_services.append(service)
+ if failing_services:
+ msg = 'Waiting for {} to start.'.format(','.join(failing_services))
+ hookenv.status_set('waiting', msg)
+ return
+
+ hookenv.status_set('active', 'Kubernetes worker running.')
+
+
+def get_node_ip():
+ '''Determines the preferred NodeIP value for this node.'''
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ if not cluster_cidr:
+ return None
+ if kubernetes_common.is_ipv6_preferred(cluster_cidr):
+ return kubernetes_common.get_ingress_address6('kube-control')
+ else:
+ return kubernetes_common.get_ingress_address('kube-control')
+
+
+@when('certificates.available', 'kube-control.connected',
+ 'cni.available', 'kube-control.dns.available')
+def send_data():
+ '''Send the data that is required to create a server certificate for
+ this server.'''
+ # Use the public ip of this unit as the Common Name for the certificate.
+ common_name = hookenv.unit_public_ip()
+
+ ingress_ip = get_node_ip()
+ bind_addrs = kubernetes_common.get_bind_addrs()
+
+ # Create SANs that the tls layer will add to the server cert.
+ sans = [
+ hookenv.unit_public_ip(),
+ ingress_ip,
+ gethostname()
+ ] + bind_addrs
+
+ # Request a server cert with this information.
+ layer.tls_client.request_server_cert(common_name, sorted(set(sans)),
+ crt_path=server_crt_path,
+ key_path=server_key_path)
+
+ # Request a client cert for kubelet.
+ layer.tls_client.request_client_cert('system:kubelet',
+ crt_path=client_crt_path,
+ key_path=client_key_path)
+
+
+@when('kube-api-endpoint.available', 'kube-control.dns.available',
+ 'cni.available', 'endpoint.container-runtime.available')
+def watch_for_changes():
+ ''' Watch for configuration changes and signal if we need to restart the
+ worker services '''
+ kube_api = endpoint_from_flag('kube-api-endpoint.available')
+ kube_control = endpoint_from_flag('kube-control.dns.available')
+ container_runtime = \
+ endpoint_from_flag('endpoint.container-runtime.available')
+
+ servers = get_kube_api_servers(kube_api)
+ dns = kube_control.get_dns()
+ cluster_cidr = kubernetes_common.cluster_cidr()
+ container_runtime_name = \
+ container_runtime.get_runtime()
+ container_runtime_socket = \
+ container_runtime.get_socket()
+ container_runtime_nvidia = \
+ container_runtime.get_nvidia_enabled()
+
+ if container_runtime_nvidia:
+ set_state('nvidia.ready')
+ else:
+ remove_state('nvidia.ready')
+
+ if (data_changed('kube-api-servers', servers) or
+ data_changed('kube-dns', dns) or
+ data_changed('cluster-cidr', cluster_cidr) or
+ data_changed('container-runtime', container_runtime_name) or
+ data_changed('container-socket', container_runtime_socket)):
+ set_state('kubernetes-worker.restart-needed')
+
+
+@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
+ 'tls_client.ca.saved', 'tls_client.certs.saved',
+ 'kube-control.dns.available', 'kube-control.auth.available',
+ 'cni.available', 'kubernetes-worker.restart-needed',
+ 'worker.auth.bootstrapped', 'endpoint.container-runtime.available',
+ 'kube-control.default_cni.available')
+@when_not('kubernetes-worker.cloud.pending',
+ 'kubernetes-worker.cloud.blocked',
+ 'upgrade.series.in-progress')
+def start_worker():
+ ''' Start kubelet using the provided API and DNS info.'''
+ # Note that the DNS server doesn't necessarily exist at this point. We know
+ # what its IP will eventually be, though, so we can go ahead and configure
+ # kubelet with that info. This ensures that early pods are configured with
+ # the correct DNS even though the server isn't ready yet.
+ kube_api = endpoint_from_flag('kube-api-endpoint.available')
+ kube_control = endpoint_from_flag('kube-control.dns.available')
+
+ servers = get_kube_api_servers(kube_api)
+ dns = kube_control.get_dns()
+ ingress_ip = get_node_ip()
+ cluster_cidr = kubernetes_common.cluster_cidr()
+
+ if cluster_cidr is None:
+ hookenv.log('Waiting for cluster cidr.')
+ return
+
+ if kubernetes_common.is_ipv6(cluster_cidr):
+ kubernetes_common.enable_ipv6_forwarding()
+
+ creds = db.get('credentials')
+ data_changed('kube-control.creds', creds)
+
+ create_config(servers[get_unit_number() % len(servers)], creds)
+ configure_default_cni()
+ configure_kubelet(dns, ingress_ip)
+ configure_kube_proxy(configure_prefix, servers,
+ cluster_cidr)
+ set_state('kubernetes-worker.config.created')
+ restart_unit_services()
+ update_kubelet_status()
+ set_state('kubernetes-worker.label-config-required')
+ set_state('nrpe-external-master.reconfigure')
+ remove_state('kubernetes-worker.restart-needed')
+
+
+@when('cni.connected')
+@when_not('cni.configured')
+def configure_cni(cni):
+ ''' Set worker configuration on the CNI relation. This lets the CNI
+ subordinate know that we're the worker so it can respond accordingly. '''
+ cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
+
+
+@when('config.changed.labels')
+def handle_labels_changed():
+ set_state('kubernetes-worker.label-config-required')
+
+
+@when('kubernetes-worker.label-config-required',
+ 'kubernetes-worker.config.created')
+def apply_node_labels():
+ ''' Parse the labels configuration option and apply the labels to the
+ node. '''
+ # Get the user's configured labels.
+ config = hookenv.config()
+ user_labels = {}
+ for item in config.get('labels').split(' '):
+ if '=' in item:
+ key, val = item.split('=')
+ user_labels[key] = val
+ else:
+ hookenv.log('Skipping malformed option: {}.'.format(item))
+ # Collect the current label state.
+ current_labels = db.get('current_labels') or {}
+
+ try:
+ # Remove any labels that the user has removed from the config.
+ for key in list(current_labels.keys()):
+ if key not in user_labels:
+ remove_label(key)
+ del current_labels[key]
+ db.set('current_labels', current_labels)
+
+ # Add any new labels.
+ for key, val in user_labels.items():
+ set_label(key, val)
+ current_labels[key] = val
+ db.set('current_labels', current_labels)
+
+ # Set the juju-application label.
+ set_label('juju-application', hookenv.service_name())
+
+ # Set the juju.io/cloud label.
+ if is_state('endpoint.aws.ready'):
+ set_label('juju.io/cloud', 'ec2')
+ elif is_state('endpoint.gcp.ready'):
+ set_label('juju.io/cloud', 'gce')
+ elif is_state('endpoint.openstack.ready'):
+ set_label('juju.io/cloud', 'openstack')
+ elif is_state('endpoint.vsphere.ready'):
+ set_label('juju.io/cloud', 'vsphere')
+ elif is_state('endpoint.azure.ready'):
+ set_label('juju.io/cloud', 'azure')
+ else:
+ remove_label('juju.io/cloud')
+ except ApplyNodeLabelFailed as e:
+ hookenv.log(str(e))
+ return
+
+ # Label configuration complete.
+ remove_state('kubernetes-worker.label-config-required')
+
+
+@when_any('config.changed.kubelet-extra-args',
+ 'config.changed.proxy-extra-args',
+ 'config.changed.kubelet-extra-config')
+def config_changed_requires_restart():
+ # LP bug #1826833, always delete the state file when extra config changes
+ # since CPU manager doesn’t support offlining and onlining of CPUs at runtime.
+ if os.path.isfile(cpu_manager_state):
+ hookenv.log("Removing file: " + cpu_manager_state)
+ os.remove(cpu_manager_state)
+ set_state('kubernetes-worker.restart-needed')
+
+
+@when_any('tls_client.certs.changed',
+ 'tls_client.ca.written')
+def restart_for_certs():
+ set_state('kubernetes-worker.restart-needed')
+ remove_state('tls_client.certs.changed')
+ remove_state('tls_client.ca.written')
+
+
+def create_config(server, creds):
+ '''Create a kubernetes configuration for the worker unit.'''
+ # Create kubernetes configuration in the default location for ubuntu.
+ create_kubeconfig('/home/ubuntu/.kube/config', server, ca_crt_path,
+ token=creds['client_token'], user='ubuntu')
+ # Make the config dir readable by the ubuntu users so juju scp works.
+ cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
+ check_call(cmd)
+ # Create kubernetes configuration in the default location for root.
+ create_kubeconfig(kubeclientconfig_path, server, ca_crt_path,
+ token=creds['client_token'], user='root')
+ # Create kubernetes configuration for kubelet, and kube-proxy services.
+ create_kubeconfig(kubeconfig_path, server, ca_crt_path,
+ token=creds['kubelet_token'], user='kubelet')
+ create_kubeconfig(kubeproxyconfig_path, server, ca_crt_path,
+ token=creds['proxy_token'], user='kube-proxy')
+
+
+def merge_kubelet_extra_config(config, extra_config):
+ ''' Updates config to include the contents of extra_config. This is done
+ recursively to allow deeply nested dictionaries to be merged.
+
+ This is destructive: it modifies the config dict that is passed in.
+ '''
+ for k, extra_config_value in extra_config.items():
+ if isinstance(extra_config_value, dict):
+ config_value = config.setdefault(k, {})
+ merge_kubelet_extra_config(config_value, extra_config_value)
+ else:
+ config[k] = extra_config_value
+
+
+def configure_kubelet(dns, ingress_ip):
+ kubelet_opts = {}
+ kubelet_opts['kubeconfig'] = kubeconfig_path
+ kubelet_opts['network-plugin'] = 'cni'
+ kubelet_opts['v'] = '0'
+ kubelet_opts['logtostderr'] = 'true'
+ kubelet_opts['node-ip'] = ingress_ip
+
+ container_runtime = \
+ endpoint_from_flag('endpoint.container-runtime.available')
+
+ kubelet_opts['container-runtime'] = container_runtime.get_runtime()
+ if kubelet_opts['container-runtime'] == 'remote':
+ kubelet_opts['container-runtime-endpoint'] = container_runtime.get_socket()
+
+ kubelet_cloud_config_path = cloud_config_path('kubelet')
+ if is_state('endpoint.aws.ready'):
+ kubelet_opts['cloud-provider'] = 'aws'
+ elif is_state('endpoint.gcp.ready'):
+ kubelet_opts['cloud-provider'] = 'gce'
+ kubelet_opts['cloud-config'] = str(kubelet_cloud_config_path)
+ elif is_state('endpoint.openstack.ready'):
+ kubelet_opts['cloud-provider'] = 'external'
+ elif is_state('endpoint.vsphere.joined'):
+ # vsphere just needs to be joined on the worker (vs 'ready')
+ kubelet_opts['cloud-provider'] = 'vsphere'
+ # NB: vsphere maps node product-id to its uuid (no config file needed).
+ uuid = _get_vmware_uuid()
+ kubelet_opts['provider-id'] = 'vsphere://{}'.format(uuid)
+ elif is_state('endpoint.azure.ready'):
+ azure = endpoint_from_flag('endpoint.azure.ready')
+ kubelet_opts['cloud-provider'] = 'azure'
+ kubelet_opts['cloud-config'] = str(kubelet_cloud_config_path)
+ kubelet_opts['provider-id'] = azure.vm_id
+
+ if get_version('kubelet') >= (1, 10):
+ # Put together the KubeletConfiguration data
+ kubelet_config = {
+ 'apiVersion': 'kubelet.config.k8s.io/v1beta1',
+ 'kind': 'KubeletConfiguration',
+ 'address': '0.0.0.0',
+ 'authentication': {
+ 'anonymous': {
+ 'enabled': False
+ },
+ 'x509': {
+ 'clientCAFile': str(ca_crt_path)
+ }
+ },
+ # NB: authz webhook config tells the kubelet to ask the api server
+ # if a request is authorized; it is not related to the authn
+ # webhook config of the k8s master services.
+ 'authorization': {
+ 'mode': 'Webhook'
+ },
+ 'clusterDomain': dns['domain'],
+ 'failSwapOn': False,
+ 'port': 10250,
+ 'protectKernelDefaults': True,
+ 'readOnlyPort': 0,
+ 'tlsCertFile': str(server_crt_path),
+ 'tlsPrivateKeyFile': str(server_key_path)
+ }
+ if dns['enable-kube-dns']:
+ kubelet_config['clusterDNS'] = [dns['sdn-ip']]
+
+ # Handle feature gates
+ feature_gates = {}
+ if get_version('kubelet') >= (1, 19):
+ # NB: required for CIS compliance
+ feature_gates['RotateKubeletServerCertificate'] = True
+ if is_state('kubernetes-worker.gpu.enabled'):
+ feature_gates['DevicePlugins'] = True
+ if feature_gates:
+ kubelet_config['featureGates'] = feature_gates
+ if kubernetes_common.is_dual_stack(kubernetes_common.cluster_cidr()):
+ feature_gates = kubelet_config.setdefault('featureGates', {})
+ feature_gates['IPv6DualStack'] = True
+
+ # Workaround for DNS on bionic
+ # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655
+ resolv_path = os.path.realpath('/etc/resolv.conf')
+ if resolv_path == '/run/systemd/resolve/stub-resolv.conf':
+ kubelet_config['resolvConf'] = '/run/systemd/resolve/resolv.conf'
+
+ # Add kubelet-extra-config. This needs to happen last so that it
+ # overrides any config provided by the charm.
+ kubelet_extra_config = hookenv.config('kubelet-extra-config')
+ kubelet_extra_config = yaml.safe_load(kubelet_extra_config)
+ merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
+
+ # Render the file and configure Kubelet to use it
+ os.makedirs('/root/cdk/kubelet', exist_ok=True)
+ with open('/root/cdk/kubelet/config.yaml', 'w') as f:
+ f.write('# Generated by kubernetes-worker charm, do not edit\n')
+ yaml.dump(kubelet_config, f)
+ kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
+ else:
+ # NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
+ # this whole block and the parent if statement.
+ kubelet_opts['address'] = '0.0.0.0'
+ kubelet_opts['anonymous-auth'] = 'false'
+ kubelet_opts['client-ca-file'] = str(ca_crt_path)
+ kubelet_opts['cluster-domain'] = dns['domain']
+ kubelet_opts['fail-swap-on'] = 'false'
+ kubelet_opts['port'] = '10250'
+ kubelet_opts['tls-cert-file'] = str(server_crt_path)
+ kubelet_opts['tls-private-key-file'] = str(server_key_path)
+ if dns['enable-kube-dns']:
+ kubelet_opts['cluster-dns'] = dns['sdn-ip']
+ if is_state('kubernetes-worker.gpu.enabled'):
+ kubelet_opts['feature-gates'] = 'DevicePlugins=true'
+
+ # Workaround for DNS on bionic, for k8s 1.9
+ # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655
+ resolv_path = os.path.realpath('/etc/resolv.conf')
+ if resolv_path == '/run/systemd/resolve/stub-resolv.conf':
+ kubelet_opts['resolv-conf'] = '/run/systemd/resolve/resolv.conf'
+
+ if get_version('kubelet') >= (1, 11):
+ kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
+
+ # If present, ensure kubelet gets the pause container from the configured
+ # registry. When not present, kubelet uses a default image location
+ # (currently k8s.gcr.io/pause:3.4.1).
+ registry_location = get_registry_location()
+ if registry_location:
+ kubelet_opts['pod-infra-container-image'] = \
+ '{}/pause:3.4.1'.format(registry_location)
+
+ configure_kubernetes_service(configure_prefix, 'kubelet', kubelet_opts,
+ 'kubelet-extra-args')
+
+
+@when('config.changed.ingress')
+def toggle_ingress_state():
+ ''' Ingress is a toggled state. Remove ingress.available if set when
+ toggled '''
+ if hookenv.config('ingress'):
+ set_state('kubernetes-worker.ingress.enabled')
+ else:
+ remove_state('kubernetes-worker.ingress.enabled')
+
+
+@when_any('config.changed.default-backend-image',
+ 'config.changed.ingress-ssl-chain-completion',
+ 'config.changed.nginx-image',
+ 'config.changed.ingress-ssl-passthrough',
+ 'config.changed.ingress-default-ssl-certificate',
+ 'config.changed.ingress-default-ssl-key')
+def reconfigure_ingress():
+ remove_state('kubernetes-worker.ingress.available')
+
+
+@when('kubernetes-worker.config.created', 'kubernetes-worker.ingress.enabled')
+@when_not('kubernetes-worker.ingress.available')
+def render_and_launch_ingress():
+ ''' Launch the Kubernetes ingress controller & default backend (404) '''
+ config = hookenv.config()
+
+ # need to test this in case we get in
+ # here from a config change to the image
+ if not config.get('ingress'):
+ return
+
+ context = {}
+ context['arch'] = arch()
+ addon_path = '/root/cdk/addons/{}'
+ context['juju_application'] = hookenv.service_name()
+
+ # If present, workers will get the ingress containers from the configured
+ # registry. Otherwise, we'll set an appropriate upstream image registry.
+ registry_location = get_registry_location()
+
+ context['defaultbackend_image'] = config.get('default-backend-image')
+ if (context['defaultbackend_image'] == "" or
+ context['defaultbackend_image'] == "auto"):
+ if registry_location:
+ backend_registry = registry_location
+ else:
+ backend_registry = 'k8s.gcr.io'
+ if context['arch'] == 's390x':
+ context['defaultbackend_image'] = \
+ "{}/defaultbackend-s390x:1.4".format(backend_registry)
+ elif context['arch'] == 'ppc64el':
+ context['defaultbackend_image'] = \
+ "{}/defaultbackend-ppc64le:1.5".format(backend_registry)
+ else:
+ context['defaultbackend_image'] = \
+ "{}/defaultbackend-{}:1.5".format(backend_registry, context['arch'])
+
+ # Render the ingress daemon set controller manifest
+ context['ssl_chain_completion'] = config.get(
+ 'ingress-ssl-chain-completion')
+ context['enable_ssl_passthrough'] = config.get(
+ 'ingress-ssl-passthrough')
+ context['default_ssl_certificate_option'] = None
+ if config.get('ingress-default-ssl-certificate') and config.get(
+ 'ingress-default-ssl-key'):
+ context['default_ssl_certificate'] = b64encode(
+ config.get('ingress-default-ssl-certificate').encode(
+ 'utf-8')).decode('utf-8')
+ context['default_ssl_key'] = b64encode(
+ config.get('ingress-default-ssl-key').encode('utf-8')).decode(
+ 'utf-8')
+ default_certificate_option = (
+ '- --default-ssl-certificate='
+ '$(POD_NAMESPACE)/default-ssl-certificate')
+ context['default_ssl_certificate_option'] = default_certificate_option
+ context['ingress_image'] = config.get('nginx-image')
+ if context['ingress_image'] == "" or context['ingress_image'] == "auto":
+ if context['arch'] == 'ppc64el':
+ # multi-arch image doesn't include ppc64le, have to use an older version
+ context['ingress_uid'] = '33'
+ context['ingress_image'] = '/'.join([
+ registry_location or 'quay.io',
+ 'kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.20.0',
+ ])
+ else:
+ context['ingress_uid'] = '101'
+ context['ingress_image'] = '/'.join([
+ registry_location or 'us.gcr.io',
+ 'k8s-artifacts-prod/ingress-nginx/controller:v0.45.0',
+ ])
+
+ kubelet_version = get_version('kubelet')
+ if kubelet_version < (1, 9):
+ context['daemonset_api_version'] = 'extensions/v1beta1'
+ context['deployment_api_version'] = 'extensions/v1beta1'
+ elif kubelet_version < (1, 16):
+ context['daemonset_api_version'] = 'apps/v1beta2'
+ context['deployment_api_version'] = 'extensions/v1beta1'
+ else:
+ context['daemonset_api_version'] = 'apps/v1'
+ context['deployment_api_version'] = 'apps/v1'
+ context['use_forwarded_headers'] = "true" if config.get(
+ "ingress-use-forwarded-headers") else "false"
+
+ manifest = addon_path.format('ingress-daemon-set.yaml')
+ render('ingress-daemon-set.yaml', manifest, context)
+ hookenv.log('Creating the ingress daemon set.')
+ try:
+ kubectl('apply', '-f', manifest)
+ except CalledProcessError as e:
+ hookenv.log(e)
+ hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
+ hookenv.close_port(80)
+ hookenv.close_port(443)
+ return
+
+ # Render the default http backend (404) deployment manifest
+ # needs to happen after ingress-daemon-set since that sets up the namespace
+ manifest = addon_path.format('default-http-backend.yaml')
+ render('default-http-backend.yaml', manifest, context)
+ hookenv.log('Creating the default http backend.')
+ try:
+ kubectl('apply', '-f', manifest)
+ except CalledProcessError as e:
+ hookenv.log(e)
+ hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
+ hookenv.close_port(80)
+ hookenv.close_port(443)
+ return
+
+ set_state('kubernetes-worker.ingress.available')
+ hookenv.open_port(80)
+ hookenv.open_port(443)
+
+
+@when('kubernetes-worker.config.created',
+ 'kubernetes-worker.ingress.available')
+@when_not('kubernetes-worker.ingress.enabled')
+def disable_ingress():
+ hookenv.log('Deleting the http backend and ingress.')
+ hookenv.close_port(80)
+ hookenv.close_port(443)
+ try:
+ kubectl('delete', '--ignore-not-found', '-f',
+ '/root/cdk/addons/default-http-backend.yaml')
+ kubectl('delete', '--ignore-not-found', '-f',
+ '/root/cdk/addons/ingress-daemon-set.yaml')
+ except CalledProcessError:
+ traceback.print_exc()
+ hookenv.log('Failed to disable ingress, waiting to retry')
+ return
+ remove_state('kubernetes-worker.ingress.available')
+
+
+def restart_unit_services():
+ '''Restart worker services.'''
+ hookenv.log('Restarting kubelet and kube-proxy.')
+ services = ['kube-proxy', 'kubelet']
+ for service in services:
+ service_restart('snap.%s.daemon' % service)
+
+
+def get_kube_api_servers(kube_api):
+ '''Return the kubernetes api server address and port for this
+ relationship.'''
+ hosts = []
+ # Iterate over every service from the relation object.
+ for service in kube_api.services():
+ for unit in service['hosts']:
+ hosts.append('https://{0}:{1}'.format(unit['hostname'],
+ unit['port']))
+ return hosts
+
+
+@when('kubernetes-worker.config.created')
+@when('nrpe-external-master.available')
+@when('kube-api-endpoint.available')
+@when('kube-control.auth.available')
+@when_any('config.changed.nagios_context',
+ 'config.changed.nagios_servicegroups',
+ 'nrpe-external-master.reconfigure')
+def update_nrpe_config():
+ services = ['snap.{}.daemon'.format(s) for s in worker_services]
+ data = render('nagios_plugin.py', context={'node_name': get_node_name()})
+ plugin_path = install_nagios_plugin_from_text(data,
+ 'check_k8s_worker.py')
+ hostname = nrpe.get_nagios_hostname()
+ current_unit = nrpe.get_nagios_unit_name()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+ nrpe_setup.add_check("node",
+ "Node registered with API Server",
+ str(plugin_path))
+ nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
+ nrpe_setup.write()
+
+ creds = db.get('credentials')
+ if creds:
+ kube_api = endpoint_from_flag('kube-api-endpoint.available')
+ servers = get_kube_api_servers(kube_api)
+ server = servers[get_unit_number() % len(servers)]
+ create_kubeconfig(nrpe_kubeconfig_path, server, ca_crt_path,
+ token=creds['client_token'], user='nagios')
+ # Make sure Nagios dirs are the correct permissions.
+ cmd = ['chown', '-R', 'nagios:nagios']
+ for p in ['/var/lib/nagios/', os.path.dirname(nrpe_kubeconfig_path)]:
+ if os.path.exists(p):
+ check_call(cmd + [p])
+
+ remove_state('nrpe-external-master.reconfigure')
+ set_state('nrpe-external-master.initial-config')
+
+
+@when_not('nrpe-external-master.available')
+@when('nrpe-external-master.initial-config')
+def remove_nrpe_config():
+ remove_state('nrpe-external-master.initial-config')
+ remove_nagios_plugin('check_k8s_worker.py')
+
+ # The current nrpe-external-master interface doesn't handle a lot of logic,
+ # use the charm-helpers code for now.
+ hostname = nrpe.get_nagios_hostname()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+
+ for service in worker_services:
+ nrpe_setup.remove_check(shortname=service)
+ nrpe_setup.remove_check(shortname='node')
+
+
+@when('nvidia.ready')
+@when('kubernetes-worker.config.created')
+@when_not('kubernetes-worker.gpu.enabled')
+def enable_gpu():
+ """Enable GPU usage on this node.
+
+ """
+ if get_version('kubelet') < (1, 9):
+ hookenv.status_set(
+ 'active',
+ 'Upgrade to snap channel >= 1.9/stable to enable GPU support.'
+ )
+ return
+
+ hookenv.log('Enabling gpu mode')
+ try:
+ # Not sure why this is necessary, but if you don't run this, k8s will
+ # think that the node has 0 gpus (as shown by the output of
+ # `kubectl get nodes -o yaml`
+ check_call(['nvidia-smi'])
+ except CalledProcessError as cpe:
+ hookenv.log('Unable to communicate with the NVIDIA driver.')
+ hookenv.log(cpe)
+ return
+
+ set_label('gpu', 'true')
+ set_label('cuda', 'true')
+
+ set_state('kubernetes-worker.gpu.enabled')
+ set_state('kubernetes-worker.restart-needed')
+
+
+@when('kubernetes-worker.gpu.enabled')
+@when_not('nvidia.ready')
+@when_not('kubernetes-worker.restart-needed')
+def nvidia_departed():
+ """Cuda departed."""
+ disable_gpu()
+ remove_state('kubernetes-worker.gpu.enabled')
+ set_state('kubernetes-worker.restart-needed')
+
+
+def disable_gpu():
+ """Disable GPU usage on this node.
+
+ """
+ hookenv.log('Disabling gpu mode')
+
+ # Remove node labels
+ remove_label('gpu')
+ remove_label('cuda')
+
+
+@when('kubernetes-worker.gpu.enabled')
+@when('kube-control.connected')
+def notify_master_gpu_enabled(kube_control):
+ """Notify kubernetes-master that we're gpu-enabled.
+
+ """
+ kube_control.set_gpu(True)
+
+
+@when_not('kubernetes-worker.gpu.enabled')
+@when('kube-control.connected')
+def notify_master_gpu_not_enabled(kube_control):
+ """Notify kubernetes-master that we're not gpu-enabled.
+
+ """
+ kube_control.set_gpu(False)
+
+
+@when('kube-control.connected')
+def request_kubelet_and_proxy_credentials(kube_control):
+ """ Request kubelet node authorization with a well formed kubelet user.
+ This also implies that we are requesting kube-proxy auth. """
+
+ # The kube-cotrol interface is created to support RBAC.
+ # At this point we might as well do the right thing and return the hostname
+ # even if it will only be used when we enable RBAC
+ nodeuser = 'system:node:{}'.format(get_node_name().lower())
+ kube_control.set_auth_request(nodeuser)
+
+
+@when('kube-control.connected')
+def catch_change_in_creds(kube_control):
+ """Request a service restart in case credential updates were detected."""
+ nodeuser = 'system:node:{}'.format(get_node_name().lower())
+ creds = kube_control.get_auth_credentials(nodeuser)
+ if creds and creds['user'] == nodeuser:
+ # We need to cache the credentials here because if the
+ # master changes (master leader dies and replaced by a new one)
+ # the new master will have no recollection of our certs.
+ db.set('credentials', creds)
+ set_state('worker.auth.bootstrapped')
+ if data_changed('kube-control.creds', creds):
+ set_state('kubernetes-worker.restart-needed')
+
+
+@when_not('kube-control.connected')
+def missing_kube_control():
+ """Inform the operator they need to add the kube-control relation.
+
+ If deploying via bundle this won't happen, but if operator is upgrading a
+ a charm in a deployment that pre-dates the kube-control relation, it'll be
+ missing.
+
+ """
+ try:
+ goal_state = hookenv.goal_state()
+ except NotImplementedError:
+ goal_state = {}
+
+ if 'kube-control' in goal_state.get('relations', {}):
+ hookenv.status_set(
+ 'waiting',
+ 'Waiting for kubernetes-master to become ready')
+ else:
+ hookenv.status_set(
+ 'blocked',
+ 'Relate {}:kube-control kubernetes-master:kube-control'.format(
+ hookenv.service_name()))
+
+
+def _systemctl_is_active(application):
+ ''' Poll systemctl to determine if the application is running '''
+ cmd = ['systemctl', 'is-active', application]
+ try:
+ raw = check_output(cmd)
+ return b'active' in raw
+ except Exception:
+ return False
+
+
+class ApplyNodeLabelFailed(Exception):
+ pass
+
+
+def persistent_call(cmd, retry_message):
+ deadline = time.time() + 180
+ while time.time() < deadline:
+ code = subprocess.call(cmd)
+ if code == 0:
+ return True
+ hookenv.log(retry_message)
+ time.sleep(1)
+ else:
+ return False
+
+
+def set_label(label, value):
+ nodename = get_node_name()
+ cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
+ cmd = cmd.format(kubeconfig_path, nodename, label, value)
+ cmd = cmd.split()
+ retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
+ if not persistent_call(cmd, retry):
+ raise ApplyNodeLabelFailed(retry)
+
+
+def remove_label(label):
+ nodename = get_node_name()
+ cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
+ cmd = cmd.format(kubeconfig_path, nodename, label)
+ cmd = cmd.split()
+ retry = 'Failed to remove label {0}. Will retry.'.format(label)
+ if not persistent_call(cmd, retry):
+ raise ApplyNodeLabelFailed(retry)
+
+
+@when_any('endpoint.aws.joined',
+ 'endpoint.gcp.joined',
+ 'endpoint.openstack.joined',
+ 'endpoint.vsphere.joined',
+ 'endpoint.azure.joined')
+@when_not('kubernetes-worker.cloud.ready')
+def set_cloud_pending():
+ k8s_version = get_version('kubelet')
+ k8s_1_11 = k8s_version >= (1, 11)
+ k8s_1_12 = k8s_version >= (1, 12)
+ vsphere_joined = is_state('endpoint.vsphere.joined')
+ azure_joined = is_state('endpoint.azure.joined')
+ if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
+ set_state('kubernetes-worker.cloud.blocked')
+ else:
+ remove_state('kubernetes-worker.cloud.blocked')
+ set_state('kubernetes-worker.cloud.pending')
+
+
+@when_any('endpoint.aws.joined',
+ 'endpoint.gcp.joined',
+ 'endpoint.azure.joined')
+@when('kube-control.cluster_tag.available')
+@when_not('kubernetes-worker.cloud.request-sent')
+def request_integration():
+ hookenv.status_set('maintenance', 'requesting cloud integration')
+ kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
+ cluster_tag = kube_control.get_cluster_tag()
+ if is_state('endpoint.aws.joined'):
+ cloud = endpoint_from_flag('endpoint.aws.joined')
+ cloud.tag_instance({
+ 'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
+ })
+ cloud.tag_instance_security_group({
+ 'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
+ })
+ cloud.tag_instance_subnet({
+ 'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
+ })
+ cloud.enable_object_storage_management(['kubernetes-*'])
+ elif is_state('endpoint.gcp.joined'):
+ cloud = endpoint_from_flag('endpoint.gcp.joined')
+ cloud.label_instance({
+ 'k8s-io-cluster-name': cluster_tag,
+ })
+ cloud.enable_object_storage_management()
+ elif is_state('endpoint.azure.joined'):
+ cloud = endpoint_from_flag('endpoint.azure.joined')
+ cloud.tag_instance({
+ 'k8s-io-cluster-name': cluster_tag,
+ })
+ cloud.enable_object_storage_management()
+ cloud.enable_instance_inspection()
+ cloud.enable_dns_management()
+ set_state('kubernetes-worker.cloud.request-sent')
+ hookenv.status_set('waiting', 'Waiting for cloud integration')
+
+
+@when_none('endpoint.aws.joined',
+ 'endpoint.gcp.joined',
+ 'endpoint.openstack.joined',
+ 'endpoint.vsphere.joined',
+ 'endpoint.azure.joined')
+@when_any('kubernetes-worker.cloud.pending',
+ 'kubernetes-worker.cloud.request-sent',
+ 'kubernetes-worker.cloud.blocked',
+ 'kubernetes-worker.cloud.ready')
+def clear_cloud_flags():
+ remove_state('kubernetes-worker.cloud.pending')
+ remove_state('kubernetes-worker.cloud.request-sent')
+ remove_state('kubernetes-worker.cloud.blocked')
+ remove_state('kubernetes-worker.cloud.ready')
+ set_state('kubernetes-worker.restart-needed') # force restart
+
+
+@when_any('endpoint.aws.ready',
+ 'endpoint.gcp.ready',
+ 'endpoint.openstack.ready',
+ 'endpoint.vsphere.ready',
+ 'endpoint.azure.ready')
+@when_not('kubernetes-worker.cloud.blocked',
+ 'kubernetes-worker.cloud.ready')
+def cloud_ready():
+ remove_state('kubernetes-worker.cloud.pending')
+ if is_state('endpoint.gcp.ready'):
+ write_gcp_snap_config('kubelet')
+ elif is_state('endpoint.azure.ready'):
+ write_azure_snap_config('kubelet')
+ set_state('kubernetes-worker.cloud.ready')
+ set_state('kubernetes-worker.restart-needed') # force restart
+
+
+def get_first_mount(mount_relation):
+ mount_relation_list = mount_relation.mounts()
+ if mount_relation_list and len(mount_relation_list) > 0:
+ # mount relation list is a list of the mount layer relations
+ # for now we just use the first one that is nfs
+ for mount in mount_relation_list:
+ # for now we just check the first mount and use that.
+ # the nfs charm only supports one for now.
+ if ('mounts' in mount and
+ mount['mounts'][0]['fstype'] == 'nfs'):
+ return mount['mounts'][0]
+ return None
+
+
+@when('nfs.available')
+def nfs_state_control(mount):
+ ''' Determine if we should remove the state that controls the re-render
+ and execution of the nfs-relation-changed event because there
+ are changes in the relationship data, and we should re-render any
+ configs '''
+
+ mount_data = get_first_mount(mount)
+ if mount_data:
+ nfs_relation_data = {
+ 'options': mount_data['options'],
+ 'host': mount_data['hostname'],
+ 'mountpoint': mount_data['mountpoint'],
+ 'fstype': mount_data['fstype']
+ }
+
+ # Re-execute the rendering if the data has changed.
+ if data_changed('nfs-config', nfs_relation_data):
+ hookenv.log('reconfiguring nfs')
+ remove_state('nfs.configured')
+
+
+@when('nfs.available')
+@when_not('nfs.configured')
+def nfs_storage(mount):
+ '''NFS on kubernetes requires nfs config rendered into a deployment of
+ the nfs client provisioner. That will handle the persistent volume claims
+ with no persistent volume to back them.'''
+
+ mount_data = get_first_mount(mount)
+ if not mount_data:
+ return
+
+ # If present, use the configured registry to define the nfs image location.
+ registry_location = get_registry_location()
+ if registry_location:
+ mount_data['registry'] = registry_location
+
+ addon_path = '/root/cdk/addons/{}'
+ # Render the NFS deployment
+ manifest = addon_path.format('nfs-provisioner.yaml')
+ render('nfs-provisioner.yaml', manifest, mount_data)
+ hookenv.log('Creating the nfs provisioner.')
+ try:
+ kubectl('apply', '-f', manifest)
+ except CalledProcessError as e:
+ hookenv.log(e)
+ hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
+ return
+
+ set_state('nfs.configured')
+
+
+@when('kube-control.registry_location.available')
+def update_registry_location():
+ """Handle changes to the container image registry.
+
+ Monitor the image registry location. If it changes, manage flags to ensure
+ our image-related handlers will be invoked with an accurate registry.
+ """
+ registry_location = get_registry_location()
+
+ if registry_location:
+ runtime = endpoint_from_flag('endpoint.container-runtime.available')
+ if runtime:
+ # Construct and send the sandbox image (pause container) to our runtime
+ uri = '{}/pause:3.4.1'.format(registry_location)
+ runtime.set_config(
+ sandbox_image=uri
+ )
+
+ if data_changed('registry-location', registry_location):
+ remove_state('kubernetes-worker.config.created')
+ remove_state('kubernetes-worker.ingress.available')
+ remove_state('nfs.configured')
+ set_state('kubernetes-worker.restart-needed')
+
+
+def get_registry_location():
+ """Get the image registry from the kube-control relation.
+
+ If an image-registry has been configured on the k8s-master, it will be set
+ set on the kube-control relation. This function returns that value stripped
+ of any trailing slash. If the relation or registry location are missing,
+ this returns an empty string.
+ """
+ kube_control = endpoint_from_flag(
+ 'kube-control.registry_location.available')
+ if kube_control:
+ rel_registry = kube_control.get_registry_location()
+ registry = rel_registry.rstrip('/') if rel_registry else ""
+ else:
+ registry = ""
+
+ return registry
+
+
+def configure_default_cni():
+ """Set the default CNI configuration to be used by CNI clients
+ (kubelet, containerd).
+
+ CNI clients choose whichever CNI config in /etc/cni/net.d/ is
+ alphabetically first, so we accomplish this by creating a file named
+ /etc/cni/net.d/05-default.conflist, which is alphabetically earlier than
+ typical CNI config names, e.g. 10-flannel.conflist and 10-calico.conflist
+
+ The created 05-default.conflist file is a symlink to whichever CNI config
+ is actually going to be used.
+ """
+ # Clean up current default
+ cni_conf_dir = '/etc/cni/net.d'
+ for filename in os.listdir(cni_conf_dir):
+ if filename.startswith('05-default.'):
+ os.remove(cni_conf_dir + '/' + filename)
+
+ # Set new default
+ kube_control = endpoint_from_flag('kube-control.default_cni.available')
+ default_cni = kube_control.get_default_cni()
+ cni = endpoint_from_flag('cni.available')
+ cni_conf = cni.get_config(default=default_cni)
+ source = cni_conf['cni-conf-file']
+ dest = cni_conf_dir + '/' + '05-default.' + source.split('.')[-1]
+ os.symlink(source, dest)
+
+
+@when('ingress-proxy.available')
+def configure_ingress_proxy(ingress_proxy):
+ ingress_proxy.configure(port='80')
diff --git a/kubernetes-worker/reactive/leadership.py b/kubernetes-worker/reactive/leadership.py
new file mode 100644
index 0000000..29c6f3a
--- /dev/null
+++ b/kubernetes-worker/reactive/leadership.py
@@ -0,0 +1,68 @@
+# Copyright 2015-2016 Canonical Ltd.
+#
+# This file is part of the Leadership Layer for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import unitdata
+
+from charms import reactive
+from charms.leadership import leader_get, leader_set
+
+
+__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
+
+
+def initialize_leadership_state():
+ '''Initialize leadership.* states from the hook environment.
+
+ Invoked by hookenv.atstart() so states are available in
+ @hook decorated handlers.
+ '''
+ is_leader = hookenv.is_leader()
+ if is_leader:
+ hookenv.log('Initializing Leadership Layer (is leader)')
+ else:
+ hookenv.log('Initializing Leadership Layer (is follower)')
+
+ reactive.helpers.toggle_state('leadership.is_leader', is_leader)
+
+ previous = unitdata.kv().getrange('leadership.settings.', strip=True)
+ current = hookenv.leader_get()
+
+ # Handle deletions.
+ for key in set(previous.keys()) - set(current.keys()):
+ current[key] = None
+
+ any_changed = False
+ for key, value in current.items():
+ reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
+ value != previous.get(key))
+ if value != previous.get(key):
+ any_changed = True
+ reactive.helpers.toggle_state('leadership.set.{}'.format(key),
+ value is not None)
+ reactive.helpers.toggle_state('leadership.changed', any_changed)
+
+ unitdata.kv().update(current, prefix='leadership.settings.')
+
+
+# Per https://github.com/juju-solutions/charms.reactive/issues/33,
+# this module may be imported multiple times so ensure the
+# initialization hook is only registered once. I have to piggy back
+# onto the namespace of a module imported before reactive discovery
+# to do this.
+if not hasattr(reactive, '_leadership_registered'):
+ hookenv.atstart(initialize_leadership_state)
+ reactive._leadership_registered = True
diff --git a/kubernetes-worker/reactive/snap.py b/kubernetes-worker/reactive/snap.py
new file mode 100644
index 0000000..1fda7b7
--- /dev/null
+++ b/kubernetes-worker/reactive/snap.py
@@ -0,0 +1,349 @@
+# Copyright 2016-2019 Canonical Ltd.
+#
+# This file is part of the Snap layer for Juju.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+charms.reactive helpers for dealing with Snap packages.
+"""
+from collections import OrderedDict
+from distutils.version import LooseVersion
+import os.path
+from os import uname
+import shutil
+import subprocess
+from textwrap import dedent
+import time
+from urllib.request import urlretrieve
+
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.hookenv import ERROR
+from charmhelpers.core.host import write_file
+from charms import layer
+from charms import reactive
+from charms.layer import snap
+from charms.reactive import register_trigger, when, when_not, toggle_flag
+from charms.reactive.helpers import data_changed
+
+
+class UnsatisfiedMinimumVersionError(Exception):
+ def __init__(self, desired, actual):
+ super().__init__()
+ self.desired = desired
+ self.actual = actual
+
+ def __str__(self):
+ return "Could not install snapd >= {0.desired}, got {0.actual}".format(self)
+
+
+class InvalidBundleError(Exception):
+ pass
+
+
+def sorted_snap_opts():
+ opts = layer.options("snap")
+ opts = sorted(opts.items(), key=lambda item: item[0] != "core")
+ opts = OrderedDict(opts)
+ return opts
+
+
+def install():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname().machine
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ # Note that this does *not* error. The charm will need to
+ # cope with the snaps it requested never getting installed,
+ # likely by doing its own check on supported-architectures.
+ hookenv.log(
+ "Snap {} not supported on {!r} architecture" "".format(snapname, arch),
+ ERROR,
+ )
+ continue
+ installed_flag = "snap.installed.{}".format(snapname)
+ if not reactive.is_flag_set(installed_flag):
+ snap.install(snapname, **snap_opts)
+ if data_changed("snap.install.opts", opts):
+ snap.connect_all()
+
+
+def check_refresh_available():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ available_refreshes = snap.get_available_refreshes()
+ for snapname in snap.get_installed_snaps():
+ toggle_flag(snap.get_refresh_available_flag(snapname), snapname in available_refreshes)
+
+
+def refresh():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ opts = sorted_snap_opts()
+ # supported-architectures is EXPERIMENTAL and undocumented.
+ # It probably should live in the base layer, blocking the charm
+ # during bootstrap if the arch is unsupported.
+ arch = uname()[4]
+ check_refresh_available()
+ for snapname, snap_opts in opts.items():
+ supported_archs = snap_opts.pop("supported-architectures", None)
+ if supported_archs and arch not in supported_archs:
+ continue
+ snap.refresh(snapname, **snap_opts)
+ snap.connect_all()
+
+
+@reactive.hook("upgrade-charm")
+def upgrade_charm():
+ refresh()
+
+
+def get_series():
+ return subprocess.check_output(["lsb_release", "-sc"], universal_newlines=True).strip()
+
+
+def snapd_supported():
+ # snaps are not supported in trusty lxc containers.
+ if get_series() == "trusty" and host.is_container():
+ return False
+ return True # For all other cases, assume true.
+
+
+def kernel_supported():
+ kernel_version = uname().release
+
+ if LooseVersion(kernel_version) < LooseVersion("4.4"):
+ hookenv.log(
+ "Snaps do not work on kernel {}, a reboot "
+ "into a supported kernel (>4.4) is required"
+ "".format(kernel_version)
+ )
+ return False
+ return True
+
+
+def ensure_snapd():
+ if not snapd_supported():
+ hookenv.log("Snaps do not work in this environment", hookenv.ERROR)
+ raise Exception("Snaps do not work in this environment")
+
+ # I don't use the apt layer, because that would tie this layer
+ # too closely to apt packaging. Perhaps this is a snap-only system.
+ if not shutil.which("snap"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "snapd"]
+ # LP:1699986: Force install of systemd on Trusty.
+ if get_series() == "trusty":
+ cmd.append("systemd")
+ subprocess.check_call(cmd, universal_newlines=True)
+
+ # Work around lp:1628289. Remove this stanza once snapd depends
+ # on the necessary package and snaps work in lxd xenial containers
+ # without the workaround.
+ if host.is_container() and not shutil.which("squashfuse"):
+ os.environ["DEBIAN_FRONTEND"] = "noninteractive"
+ cmd = ["apt-get", "install", "-y", "squashfuse", "fuse"]
+ subprocess.check_call(cmd, universal_newlines=True)
+
+
+def proxy_settings():
+ proxy_vars = ("http_proxy", "https_proxy")
+ proxy_env = {key: value for key, value in os.environ.items() if key in proxy_vars}
+
+ snap_proxy = hookenv.config().get("snap_proxy")
+ if snap_proxy:
+ proxy_env["http_proxy"] = snap_proxy
+ proxy_env["https_proxy"] = snap_proxy
+ return proxy_env
+
+
+def update_snap_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ # This is a hack based on
+ # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1
+ # Do it properly when Bug #1533899 is addressed.
+ # Note we can't do this in a standard reactive handler as we need
+ # to ensure proxies are configured before attempting installs or
+ # updates.
+ proxy = proxy_settings()
+
+ override_dir = "/etc/systemd/system/snapd.service.d"
+ path = os.path.join(override_dir, "snap_layer_proxy.conf")
+ if not proxy and not os.path.exists(path):
+ return # No proxy asked for and proxy never configured.
+
+ # It seems we cannot rely on this directory existing, so manually
+ # create it.
+ if not os.path.exists(override_dir):
+ host.mkdir(override_dir, perms=0o755)
+
+ if not data_changed("snap.proxy", proxy):
+ return # Short circuit avoids unnecessary restarts.
+
+ if proxy:
+ create_snap_proxy_conf(path, proxy)
+ else:
+ remove_snap_proxy_conf(path)
+ subprocess.check_call(["systemctl", "daemon-reload"], universal_newlines=True)
+ time.sleep(2)
+ subprocess.check_call(["systemctl", "restart", "snapd.service"], universal_newlines=True)
+
+
+def create_snap_proxy_conf(path, proxy):
+ host.mkdir(os.path.dirname(path))
+ content = dedent(
+ """\
+ # Managed by Juju
+ [Service]
+ """
+ )
+ for proxy_key, proxy_value in proxy.items():
+ content += "Environment={}={}\n".format(proxy_key, proxy_value)
+ host.write_file(path, content.encode())
+
+
+def remove_snap_proxy_conf(path):
+ if os.path.exists(path):
+ os.remove(path)
+
+
+def ensure_path():
+ # Per Bug #1662856, /snap/bin may be missing from $PATH. Fix this.
+ if "/snap/bin" not in os.environ["PATH"].split(":"):
+ os.environ["PATH"] += ":/snap/bin"
+
+
+def _get_snapd_version():
+ stdout = subprocess.check_output(["snap", "version"], stdin=subprocess.DEVNULL, universal_newlines=True)
+ version_info = dict(line.split(None, 1) for line in stdout.splitlines())
+ return LooseVersion(version_info["snapd"])
+
+
+PREFERENCES = """\
+Package: *
+Pin: release a={}-proposed
+Pin-Priority: 400
+"""
+
+
+def ensure_snapd_min_version(min_version):
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ from charmhelpers.fetch import add_source, apt_update, apt_install
+
+ # Temporary until LP:1735344 lands
+ add_source("distro-proposed", fail_invalid=True)
+ distro = get_series()
+ # disable proposed by default, needs to explicit
+ write_file(
+ "/etc/apt/preferences.d/proposed",
+ PREFERENCES.format(distro),
+ )
+ apt_update()
+ # explicitly install snapd from proposed
+ apt_install("snapd/{}-proposed".format(distro))
+ snapd_version = _get_snapd_version()
+ if snapd_version < LooseVersion(min_version):
+ hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR)
+ raise UnsatisfiedMinimumVersionError(min_version, snapd_version)
+
+
+def download_assertion_bundle(proxy_url):
+ """Download proxy assertion bundle and store id"""
+ assertions_url = "{}/v2/auth/store/assertions".format(proxy_url)
+ local_bundle, headers = urlretrieve(assertions_url)
+ store_id = headers["X-Assertion-Store-Id"]
+ return local_bundle, store_id
+
+
+def configure_snap_store_proxy():
+ # Do nothing if we don't have kernel support yet
+ if not kernel_supported():
+ return
+
+ if not reactive.is_flag_set("config.changed.snap_proxy_url"):
+ return
+ config = hookenv.config()
+ if "snap_proxy_url" not in config:
+ # The deprecated snap_proxy_url config items have been removed
+ # from config.yaml. If the charm author hasn't added them back
+ # explicitly, there is nothing to do. Juju is maintaining these
+ # settings as model configuration.
+ return
+ snap_store_proxy_url = config.get("snap_proxy_url")
+ if not snap_store_proxy_url and not config.previous("snap_proxy_url"):
+ # Proxy url is not set, and was not set previous hook. Do nothing,
+ # to avoid overwriting the Juju maintained setting.
+ return
+ ensure_snapd_min_version("2.30")
+ if snap_store_proxy_url:
+ bundle, store_id = download_assertion_bundle(snap_store_proxy_url)
+ try:
+ subprocess.check_output(
+ ["snap", "ack", bundle],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("snapd could not ack the proxy assertion: " + e.output)
+ else:
+ store_id = ""
+
+ try:
+ subprocess.check_output(
+ ["snap", "set", "core", "proxy.store={}".format(store_id)],
+ stdin=subprocess.DEVNULL,
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError as e:
+ raise InvalidBundleError("Proxy ID from header did not match store assertion: " + e.output)
+
+
+register_trigger(when="config.changed.snapd_refresh", clear_flag="snap.refresh.set")
+
+
+@when_not("snap.refresh.set")
+@when("snap.installed.core")
+def change_snapd_refresh():
+ """Set the system refresh.timer option"""
+ ensure_snapd_min_version("2.31")
+ timer = hookenv.config()["snapd_refresh"]
+ was_set = reactive.is_flag_set("snap.refresh.was-set")
+ if timer or was_set:
+ snap.set_refresh_timer(timer)
+ reactive.toggle_flag("snap.refresh.was-set", timer)
+ reactive.set_flag("snap.refresh.set")
+
+
+# Bootstrap. We don't use standard reactive handlers to ensure that
+# everything is bootstrapped before any charm handlers are run.
+hookenv.atstart(hookenv.log, "Initializing Snap Layer")
+hookenv.atstart(ensure_snapd)
+hookenv.atstart(ensure_path)
+hookenv.atstart(update_snap_proxy)
+hookenv.atstart(configure_snap_store_proxy)
+hookenv.atstart(install)
diff --git a/kubernetes-worker/reactive/tls_client.py b/kubernetes-worker/reactive/tls_client.py
new file mode 100644
index 0000000..afa2228
--- /dev/null
+++ b/kubernetes-worker/reactive/tls_client.py
@@ -0,0 +1,208 @@
+import os
+
+from pathlib import Path
+from subprocess import check_call
+
+from charms import layer
+from charms.reactive import hook
+from charms.reactive import set_state, remove_state
+from charms.reactive import when
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import endpoint_from_flag
+from charms.reactive.helpers import data_changed
+
+from charmhelpers.core import hookenv, unitdata
+from charmhelpers.core.hookenv import log
+
+
+@when('certificates.ca.available')
+def store_ca(tls):
+ '''Read the certificate authority from the relation object and install
+ the ca on this system.'''
+ # Get the CA from the relationship object.
+ certificate_authority = tls.get_ca()
+ if certificate_authority:
+ layer_options = layer.options('tls-client')
+ ca_path = layer_options.get('ca_certificate_path')
+ changed = data_changed('certificate_authority', certificate_authority)
+ if ca_path:
+ if changed or not os.path.exists(ca_path):
+ log('Writing CA certificate to {0}'.format(ca_path))
+ # ensure we have a newline at the end of the certificate.
+ # some things will blow up without one.
+ # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034
+ if not certificate_authority.endswith('\n'):
+ certificate_authority += '\n'
+ _write_file(ca_path, certificate_authority)
+ set_state('tls_client.ca.written')
+ set_state('tls_client.ca.saved')
+ if changed:
+ # Update /etc/ssl/certs and generate ca-certificates.crt
+ install_ca(certificate_authority)
+
+
+@when('certificates.server.cert.available')
+def store_server(tls):
+ '''Read the server certificate and server key from the relation object
+ and save them to the certificate directory..'''
+ server_cert, server_key = tls.get_server_cert()
+ chain = tls.get_chain()
+ if chain:
+ server_cert = server_cert + '\n' + chain
+ if server_cert and server_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('server_certificate_path')
+ key_path = layer_options.get('server_key_path')
+ cert_changed = data_changed('server_certificate', server_cert)
+ key_changed = data_changed('server_key', server_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing server certificate to {0}'.format(cert_path))
+ _write_file(cert_path, server_cert)
+ set_state('tls_client.server.certificate.written')
+ set_state('tls_client.server.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing server key to {0}'.format(key_path))
+ _write_file(key_path, server_key)
+ set_state('tls_client.server.key.saved')
+
+
+@when('certificates.client.cert.available')
+def store_client(tls):
+ '''Read the client certificate and client key from the relation object
+ and copy them to the certificate directory.'''
+ client_cert, client_key = tls.get_client_cert()
+ chain = tls.get_chain()
+ if chain:
+ client_cert = client_cert + '\n' + chain
+ if client_cert and client_key:
+ layer_options = layer.options('tls-client')
+ cert_path = layer_options.get('client_certificate_path')
+ key_path = layer_options.get('client_key_path')
+ cert_changed = data_changed('client_certificate', client_cert)
+ key_changed = data_changed('client_key', client_key)
+ if cert_path:
+ if cert_changed or not os.path.exists(cert_path):
+ log('Writing client certificate to {0}'.format(cert_path))
+ _write_file(cert_path, client_cert)
+ set_state('tls_client.client.certificate.written')
+ set_state('tls_client.client.certificate.saved')
+ if key_path:
+ if key_changed or not os.path.exists(key_path):
+ log('Writing client key to {0}'.format(key_path))
+ _write_file(key_path, client_key)
+ set_state('tls_client.client.key.saved')
+
+
+@when('certificates.certs.changed')
+def update_certs():
+ tls = endpoint_from_flag('certificates.certs.changed')
+ certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {})
+ all_ready = True
+ any_changed = False
+ maps = {
+ 'server': tls.server_certs_map,
+ 'client': tls.client_certs_map,
+ }
+
+ if maps.get('client') == {}:
+ log(
+ 'No client certs found using maps. Checking for global \
+ client certificates.',
+ 'WARNING'
+ )
+ # Check for global certs,
+ # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819
+ cert_pair = tls.get_client_cert()
+ if cert_pair is not None:
+ for client_name in certs_paths.get('client', {}).keys():
+ maps.get('client').update({
+ client_name: cert_pair
+ })
+
+ chain = tls.get_chain()
+ for cert_type in ('server', 'client'):
+ for common_name, paths in certs_paths.get(cert_type, {}).items():
+ cert_pair = maps[cert_type].get(common_name)
+ if not cert_pair:
+ all_ready = False
+ continue
+ if not data_changed('layer.tls-client.'
+ '{}.{}'.format(cert_type, common_name), cert_pair):
+ continue
+
+ cert = None
+ key = None
+ if type(cert_pair) is not tuple:
+ if paths['crt']:
+ cert = cert_pair.cert
+ if paths['key']:
+ key = cert_pair.key
+ else:
+ cert, key = cert_pair
+
+ if cert:
+ if chain:
+ cert = cert + '\n' + chain
+ _ensure_directory(paths['crt'])
+ Path(paths['crt']).write_text(cert)
+
+ if key:
+ _ensure_directory(paths['key'])
+ Path(paths['key']).write_text(key)
+
+ any_changed = True
+ # clear flags first to ensure they are re-triggered if left set
+ clear_flag('tls_client.{}.certs.changed'.format(cert_type))
+ clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ set_flag('tls_client.{}.certs.changed'.format(cert_type))
+ set_flag('tls_client.{}.cert.{}.changed'.format(cert_type,
+ common_name))
+ if all_ready:
+ set_flag('tls_client.certs.saved')
+ if any_changed:
+ clear_flag('tls_client.certs.changed')
+ set_flag('tls_client.certs.changed')
+ clear_flag('certificates.certs.changed')
+
+
+def install_ca(certificate_authority):
+ '''Install a certificiate authority on the system by calling the
+ update-ca-certificates command.'''
+ if certificate_authority:
+ name = hookenv.service_name()
+ # Create a path to install CAs on Debian systems.
+ ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name)
+ log('Writing CA certificate to {0}'.format(ca_path))
+ _write_file(ca_path, certificate_authority)
+ # Update the trusted CAs on this system (a time expensive operation).
+ check_call(['update-ca-certificates'])
+ log('Generated ca-certificates.crt for {0}'.format(name))
+ set_state('tls_client.ca_installed')
+
+
+@hook('upgrade-charm')
+def remove_states():
+ remove_state('tls_client.ca.saved')
+ remove_state('tls_client.server.certificate.saved')
+ remove_state('tls_client.server.key.saved')
+ remove_state('tls_client.client.certificate.saved')
+ remove_state('tls_client.client.key.saved')
+
+
+def _ensure_directory(path):
+ '''Ensure the parent directory exists creating directories if necessary.'''
+ directory = os.path.dirname(path)
+ if not os.path.isdir(directory):
+ os.makedirs(directory)
+ os.chmod(directory, 0o770)
+
+
+def _write_file(path, content):
+ '''Write the path to a file.'''
+ _ensure_directory(path)
+ with open(path, 'w') as stream:
+ stream.write(content)
+ os.chmod(path, 0o440)
diff --git a/kubernetes-worker/registry-configmap.yaml b/kubernetes-worker/registry-configmap.yaml
new file mode 100644
index 0000000..b34c736
--- /dev/null
+++ b/kubernetes-worker/registry-configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+data:
+ proxy-body-size: 1024m
+kind: ConfigMap
+metadata:
+ name: nginx-configuration
+ namespace: ingress-nginx-kubernetes-worker
diff --git a/kubernetes-worker/requirements.txt b/kubernetes-worker/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/kubernetes-worker/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/kubernetes-worker/revision b/kubernetes-worker/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/kubernetes-worker/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/kubernetes-worker/script/bootstrap b/kubernetes-worker/script/bootstrap
new file mode 100644
index 0000000..b69771c
--- /dev/null
+++ b/kubernetes-worker/script/bootstrap
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -x
+
+sudo apt update
+sudo apt install -qyf docker.io
+sudo snap install charm --classic
+sudo snap install yq
diff --git a/kubernetes-worker/script/build b/kubernetes-worker/script/build
new file mode 100644
index 0000000..6bbbc48
--- /dev/null
+++ b/kubernetes-worker/script/build
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm build -r --force -o "$CHARM_BUILD_DIR"
diff --git a/kubernetes-worker/script/upload b/kubernetes-worker/script/upload
new file mode 100644
index 0000000..548195b
--- /dev/null
+++ b/kubernetes-worker/script/upload
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -x
+
+export PATH=/snap/bin:$PATH
+
+: "${CHARM_BUILD_DIR:=/tmp/charms}"
+
+charm whoami
+RET=$?
+if ((RET > 0)); then
+ echo "Not logged into charmstore"
+ exit 1
+fi
+
+function generate::attachments
+{
+ ./build-cni-resources.sh
+ for resource in *.tgz; do
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished "${resource%.*}"="$resource"
+ done
+
+ snap_placeholders=(core kubectl kubelet kube-proxy)
+ mkdir -p placeholders
+ for snap in ${snap_placeholders[@]}; do
+ touch placeholders/"$snap.snap"
+ charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished "$snap"=placeholders/"$snap".snap
+ done
+}
+
+function generate::resource::argument
+{
+ py_script="
+import sys
+import json
+resources_json = json.load(sys.stdin)
+resource_map = []
+for item in resources_json:
+ resource_map.append(f\"--resource {item['Name']}-{item['Revision']}\")
+
+print(' '.join(resource_map))
+"
+ charm list-resources cs:~"$NAMESPACE"/"$CHARM" --channel unpublished --format json | env python3 -c "$py_script"
+}
+
+URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url)
+generate::attachments
+
+if [ "$CHANNEL" != unpublished ]; then
+ charm release "$URL" --channel "$CHANNEL" $(generate::resource::argument)
+fi
diff --git a/kubernetes-worker/setup.py b/kubernetes-worker/setup.py
new file mode 100755
index 0000000..b30bff5
--- /dev/null
+++ b/kubernetes-worker/setup.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+import os
+from setuptools import setup
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+with open(os.path.join(here, "README.md")) as f:
+ README = f.read()
+
+setup(
+ name="layer_snap",
+ version="1.0.0",
+ description="layer_snap",
+ long_description=README,
+ license="Apache License 2.0",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3",
+ ],
+ url="https://git.launchpad.net/layer-snap",
+ package_dir={"": "lib"},
+ packages=["charms/layer"],
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=["charmhelpers", "charms.reactive"],
+)
diff --git a/kubernetes-worker/templates/cdk-service-kicker b/kubernetes-worker/templates/cdk-service-kicker
new file mode 100644
index 0000000..26d3740
--- /dev/null
+++ b/kubernetes-worker/templates/cdk-service-kicker
@@ -0,0 +1,34 @@
+#!/bin/sh
+set -eu
+
+# This service runs on boot to work around issues relating to LXD and snapd.
+
+# Workaround for https://github.com/conjure-up/conjure-up/issues/1448
+if [ -f '/proc/1/environ' ] && grep -q '^container=lxc' /proc/1/environ; then
+ echo "lxc detected, applying snapd apparmor profiles"
+ (set +e
+ apparmor_parser /var/lib/snapd/apparmor/profiles/*
+ echo "apparmor_parser: exit status $?"
+ )
+else
+ echo "lxc not detected, skipping snapd apparmor profiles"
+fi
+
+# Workaround for https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
+services="{{services}}"
+
+deadline="$(expr "$(date +%s)" + 600)"
+
+while [ "$(date +%s)" -lt "$deadline" ]; do
+ for service in $services; do
+ echo "$service: checking"
+ if ! systemctl is-active "$service"; then
+ echo "$service: not active, restarting"
+ systemctl restart "$service" || true
+ fi
+ done
+
+ sleep 10
+done
+
+echo "deadline has passed, exiting gracefully"
diff --git a/kubernetes-worker/templates/cdk-service-kicker.service b/kubernetes-worker/templates/cdk-service-kicker.service
new file mode 100644
index 0000000..5c2105e
--- /dev/null
+++ b/kubernetes-worker/templates/cdk-service-kicker.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=cdk-service-kicker
+
+[Service]
+ExecStart=/usr/bin/cdk-service-kicker
+Restart=on-failure
+Type=simple
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes-worker/templates/default-http-backend.yaml b/kubernetes-worker/templates/default-http-backend.yaml
new file mode 100644
index 0000000..4f1969f
--- /dev/null
+++ b/kubernetes-worker/templates/default-http-backend.yaml
@@ -0,0 +1,62 @@
+apiVersion: {{ deployment_api_version }}
+kind: Deployment
+metadata:
+ name: default-http-backend-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: default-http-backend-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+ cdk-restart-on-ca-change: "true"
+ namespace: ingress-nginx-{{ juju_application }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: default-http-backend-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: default-http-backend-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ spec:
+ terminationGracePeriodSeconds: 60
+ containers:
+ - name: default-http-backend-{{ juju_application }}
+ # Any image is permissible as long as:
+ # 1. It serves a 404 page at /
+ # 2. It serves 200 on a /healthz endpoint
+ image: {{ defaultbackend_image }}
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 8080
+ resources:
+ limits:
+ cpu: 10m
+ memory: 20Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: default-http-backend-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: default-http-backend-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ selector:
+ app.kubernetes.io/name: default-http-backend-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
diff --git a/kubernetes-worker/templates/ingress-daemon-set.yaml b/kubernetes-worker/templates/ingress-daemon-set.yaml
new file mode 100644
index 0000000..646856b
--- /dev/null
+++ b/kubernetes-worker/templates/ingress-daemon-set.yaml
@@ -0,0 +1,300 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ingress-nginx-{{ juju_application }}
+ labels:
+ cdk-{{ juju_application }}-ingress: "true"
+
+{%- if default_ssl_certificate_option %}
+---
+kind: Secret
+apiVersion: v1
+type: Opaque
+metadata:
+ name: default-ssl-certificate
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+data:
+ tls.crt: {{ default_ssl_certificate }}
+ tls.key: {{ default_ssl_key }}
+{%- endif %}
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-configuration
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+data:
+ use-forwarded-headers: "{{ use_forwarded_headers }}"
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tcp-services
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: udp-services
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nginx-ingress-serviceaccount-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: nginx-ingress-clusterrole-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - endpoints
+ - nodes
+ - pods
+ - secrets
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - apiGroups:
+ - "extensions"
+ - "networking.k8s.io"
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - "extensions"
+ - "networking.k8s.io"
+ resources:
+ - ingresses/status
+ verbs:
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: nginx-ingress-role-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - secrets
+ - namespaces
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ resourceNames:
+ # Defaults to "-"
+ # Here: "-"
+ # This has to be adapted if you change either parameter
+ # when launching the nginx-ingress-controller.
+ - "ingress-controller-leader-nginx"
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: nginx-ingress-role-nisa-binding-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: nginx-ingress-role-{{ juju_application }}
+subjects:
+ - kind: ServiceAccount
+ name: nginx-ingress-serviceaccount-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: nginx-ingress-clusterrole-nisa-binding-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: nginx-ingress-clusterrole-{{ juju_application }}
+subjects:
+ - kind: ServiceAccount
+ name: nginx-ingress-serviceaccount-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+
+---
+apiVersion: {{ daemonset_api_version }}
+kind: DaemonSet
+metadata:
+ name: nginx-ingress-controller-{{ juju_application }}
+ namespace: ingress-nginx-{{ juju_application }}
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ juju-application: nginx-ingress-{{ juju_application }}
+ cdk-{{ juju_application }}-ingress: "true"
+ cdk-restart-on-ca-change: "true"
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: ingress-nginx-{{ juju_application }}
+ app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }}
+ annotations:
+ prometheus.io/port: "10254"
+ prometheus.io/scrape: "true"
+ spec:
+ serviceAccountName: nginx-ingress-serviceaccount-{{ juju_application }}
+ nodeSelector:
+ juju-application: {{ juju_application }}
+ terminationGracePeriodSeconds: 60
+ # hostPort doesn't work with CNI, so we have to use hostNetwork instead
+ # see https://github.com/kubernetes/kubernetes/issues/23920
+ hostNetwork: true
+ containers:
+ - name: nginx-ingress-controller{{ juju_application }}
+ image: {{ ingress_image }}
+ args:
+ - /nginx-ingress-controller
+ - --configmap=$(POD_NAMESPACE)/nginx-configuration
+ - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
+ - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
+ - --annotations-prefix=nginx.ingress.kubernetes.io
+ - --enable-ssl-chain-completion={{ ssl_chain_completion }}
+ - --enable-ssl-passthrough={{ enable_ssl_passthrough }}
+{%- if default_ssl_certificate_option %}
+ {{ default_ssl_certificate_option }}
+{%- endif %}
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ runAsUser: {{ ingress_uid }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - name: http
+ containerPort: 80
+ - name: https
+ containerPort: 443
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10254
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+
+---
diff --git a/kubernetes-worker/templates/microbot-example.yaml b/kubernetes-worker/templates/microbot-example.yaml
new file mode 100644
index 0000000..c89be53
--- /dev/null
+++ b/kubernetes-worker/templates/microbot-example.yaml
@@ -0,0 +1,66 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ creationTimestamp: null
+ labels:
+ app: microbot
+ name: microbot
+spec:
+ replicas: {{ replicas }}
+ selector:
+ matchLabels:
+ app: microbot
+ strategy: {}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: microbot
+ spec:
+ containers:
+ - image: {{ registry|default("docker.io") }}/cdkbot/microbot-{{ arch }}:latest
+ imagePullPolicy: ""
+ name: microbot
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 5
+ timeoutSeconds: 30
+ resources: {}
+ restartPolicy: Always
+ serviceAccountName: ""
+status: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: microbot
+ labels:
+ app: microbot
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ app: microbot
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: microbot-ingress
+spec:
+ rules:
+ - host: microbot.{{ public_address }}.nip.io
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: microbot
+ port:
+ number: 80
diff --git a/kubernetes-worker/templates/nagios_plugin.py b/kubernetes-worker/templates/nagios_plugin.py
new file mode 100644
index 0000000..958b4ed
--- /dev/null
+++ b/kubernetes-worker/templates/nagios_plugin.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2019 Canonical Ltd.
+
+import nagios_plugin3
+import yaml
+from subprocess import check_output, CalledProcessError, PIPE
+
+snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
+
+
+def check_snaps_installed():
+ """Confirm the snaps are installed, raise an error if not"""
+ for snap_name in snap_resources:
+ cmd = ['snap', 'list', snap_name]
+ try:
+ check_output(cmd).decode('UTF-8')
+ except Exception:
+ msg = '{} snap is not installed'.format(snap_name)
+ raise nagios_plugin3.CriticalError(msg)
+
+
+def check_node(node):
+ # Note: Keep the Ready check first since all checks will fail when not Ready
+ checks = [{'name': 'Ready',
+ 'expected': 'True',
+ 'type': 'error',
+ 'error': 'Node Not Ready'},
+ {'name': 'MemoryPressure',
+ 'expected': 'False',
+ 'type': 'warn',
+ 'error': 'Memory Pressure'},
+ {'name': 'DiskPressure',
+ 'expected': 'False',
+ 'type': 'warn',
+ 'error': 'Disk Pressure'},
+ {'name': 'PIDPressure',
+ 'expected': 'False',
+ 'type': 'warn',
+ 'error': 'PID Pressure'},
+ ]
+ msg = []
+ error = False
+ for check in checks:
+ # find the status that matches
+ for s in node['status']['conditions']:
+ if s['type'] == check['name']:
+ # does it match expectations? If not, toss it on the list
+ # of errors so we don't show the first issue, but all.
+ if s['status'].lower() != check['expected'].lower():
+ msg.append(check['error'])
+ if check['type'] == 'error':
+ error = True
+ else:
+ break
+ else:
+ err_msg = 'Unable to find status for {}'.format(check['error'])
+ raise nagios_plugin3.CriticalError(err_msg)
+
+ if msg:
+ if error:
+ raise nagios_plugin3.CriticalError(msg)
+ else:
+ raise nagios_plugin3.WarnError(msg)
+
+
+def verify_node_registered_and_ready():
+ node = None
+ try:
+ cmd = [
+ "/snap/bin/kubectl", "--kubeconfig", "/var/lib/nagios/.kube/config",
+ "get", "no", "{{node_name}}", "-o=yaml"
+ ]
+ node = yaml.safe_load(check_output(cmd, stderr=PIPE))
+ except CalledProcessError as e:
+ err = e.stderr.decode('UTF-8')
+ if "not found" in err:
+ raise nagios_plugin3.CriticalError("Unable to find "
+ "node registered on API server")
+ if not node:
+ raise nagios_plugin3.CriticalError("Unable to run kubectl "
+ "and parse output")
+ return check_node(node)
+
+
+def main():
+ nagios_plugin3.try_check(check_snaps_installed)
+ nagios_plugin3.try_check(verify_node_registered_and_ready)
+ print("OK - No memory, disk, or PID pressure. Registered with API server")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/kubernetes-worker/templates/nfs-provisioner.yaml b/kubernetes-worker/templates/nfs-provisioner.yaml
new file mode 100644
index 0000000..0f0c85c
--- /dev/null
+++ b/kubernetes-worker/templates/nfs-provisioner.yaml
@@ -0,0 +1,103 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: default
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+provisioner: fuseim.pri/ifs
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: {{registry|default('quay.io')}}/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: fuseim.pri/ifs
+ - name: NFS_SERVER
+ value: {{ hostname }}
+ - name: NFS_PATH
+ value: {{ mountpoint }}
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: {{ hostname }}
+ path: {{ mountpoint }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ namespace: default
+roleRef:
+ kind: ClusterRole
+ name: nfs-client-provisioner-runner
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ namespace: default
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kubernetes-worker/templates/registry.yaml b/kubernetes-worker/templates/registry.yaml
new file mode 100644
index 0000000..90c1ac7
--- /dev/null
+++ b/kubernetes-worker/templates/registry.yaml
@@ -0,0 +1,118 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: registry-tls-data
+type: Opaque
+data:
+ tls.crt: {{ tlscert }}
+ tls.key: {{ tlskey }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: registry-auth-data
+type: Opaque
+data:
+ htpasswd: {{ htpasswd }}
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: kube-registry-v0
+ labels:
+ k8s-app: kube-registry
+ version: v0
+ kubernetes.io/cluster-service: "true"
+spec:
+ replicas: 1
+ selector:
+ k8s-app: kube-registry
+ version: v0
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-registry
+ version: v0
+ kubernetes.io/cluster-service: "true"
+ spec:
+ containers:
+ - name: registry
+ image: {{ registry|default("docker.io") }}/cdkbot/registry-{{ arch }}:2.6
+ resources:
+ # keep request = limit to keep this container in guaranteed class
+ limits:
+ cpu: 100m
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ env:
+ - name: REGISTRY_HTTP_ADDR
+ value: :5000
+ - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
+ value: /var/lib/registry
+ - name: REGISTRY_AUTH_HTPASSWD_REALM
+ value: basic_realm
+ - name: REGISTRY_AUTH_HTPASSWD_PATH
+ value: /auth/htpasswd
+ volumeMounts:
+ - name: image-store
+ mountPath: /var/lib/registry
+ - name: auth-dir
+ mountPath: /auth
+ ports:
+ - containerPort: 5000
+ name: registry
+ protocol: TCP
+ volumes:
+ - name: image-store
+ hostPath:
+ path: /srv/registry
+ - name: auth-dir
+ secret:
+ secretName: registry-auth-data
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-registry
+ labels:
+ k8s-app: kube-registry
+ kubernetes.io/cluster-service: "true"
+ kubernetes.io/name: "KubeRegistry"
+spec:
+ selector:
+ k8s-app: kube-registry
+ type: LoadBalancer
+ ports:
+ - name: registry
+ port: 5000
+ protocol: TCP
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: registry-access
+data:
+ .dockercfg: {{ dockercfg }}
+type: kubernetes.io/dockercfg
+{%- if ingress %}
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: registry-ing
+spec:
+ tls:
+ - hosts:
+ - {{ domain }}
+ secretName: registry-tls-data
+ rules:
+ - host: {{ domain }}
+ http:
+ paths:
+ - backend:
+ serviceName: kube-registry
+ servicePort: 5000
+ path: /
+{% endif %}
diff --git a/kubernetes-worker/tox.ini b/kubernetes-worker/tox.ini
new file mode 100644
index 0000000..9350363
--- /dev/null
+++ b/kubernetes-worker/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+skipsdist = True
+envlist=lint
+
+[flake8]
+max-complexity=10
+max-line-length=120
+ignore=E203,E402,W503,E231
+
+[testenv:lint]
+basepython=python3
+sitepackages=False
+deps=
+ flake8
+ black
+commands=
+ flake8 {posargs:lib/ reactive/}
+ black --line-length=120 {posargs:lib/ reactive/}
diff --git a/kubernetes-worker/version b/kubernetes-worker/version
new file mode 100644
index 0000000..1dea0b1
--- /dev/null
+++ b/kubernetes-worker/version
@@ -0,0 +1 @@
+e247aeff
\ No newline at end of file
diff --git a/kubernetes-worker/wheelhouse.txt b/kubernetes-worker/wheelhouse.txt
new file mode 100644
index 0000000..39d6a8c
--- /dev/null
+++ b/kubernetes-worker/wheelhouse.txt
@@ -0,0 +1,22 @@
+# layer:basic
+# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
+# even with installing setuptools before upgrading pip ends up with pip seeing
+# the older setuptools at the system level if include_system_packages is true
+pip>=18.1,<19.0
+# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
+# layer:snap
+tenacity
+
+# kubernetes-worker
+charms.templating.jinja2>=0.0.1,<2.0.0
+
diff --git a/kubernetes-worker/wheelhouse/Jinja2-2.10.1.tar.gz b/kubernetes-worker/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/kubernetes-worker/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/MarkupSafe-1.1.1.tar.gz b/kubernetes-worker/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/kubernetes-worker/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/PyYAML-5.2.tar.gz b/kubernetes-worker/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/kubernetes-worker/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/Tempita-0.5.2.tar.gz b/kubernetes-worker/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/kubernetes-worker/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz
new file mode 100644
index 0000000..ca65d07
Binary files /dev/null and b/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/charms.reactive-1.4.1.tar.gz b/kubernetes-worker/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/kubernetes-worker/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz b/kubernetes-worker/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz
new file mode 100644
index 0000000..5c03a81
Binary files /dev/null and b/kubernetes-worker/wheelhouse/charms.templating.jinja2-1.0.2.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz b/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/pbr-5.6.0.tar.gz b/kubernetes-worker/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/kubernetes-worker/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/pip-18.1.tar.gz b/kubernetes-worker/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/kubernetes-worker/wheelhouse/pip-18.1.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz b/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip b/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz b/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz
new file mode 100644
index 0000000..63329e4
Binary files /dev/null and b/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz b/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz
new file mode 100644
index 0000000..2050c4d
Binary files /dev/null and b/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz differ
diff --git a/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz b/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz differ