diff --git a/kubeapi-load-balancer.charm b/kubeapi-load-balancer.charm deleted file mode 100644 index 03b3358..0000000 Binary files a/kubeapi-load-balancer.charm and /dev/null differ diff --git a/kubeapi-load-balancer/.build.manifest b/kubeapi-load-balancer/.build.manifest new file mode 100644 index 0000000..5d5409f --- /dev/null +++ b/kubeapi-load-balancer/.build.manifest @@ -0,0 +1,976 @@ +{ + "layers": [ + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", + "url": "layer:metrics" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", + "url": "layer:options" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "fb767dcf0786d1d5364199bb3b40bdc86518b45b", + "url": "layer:basic" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", + "url": "layer:nagios" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", + "url": "layer:status" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34", + "url": "layer:apt" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "672d27695b512e50f51777b1eb63c5ff157b3d9e", + "url": "layer:nginx" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", + "url": "layer:debug" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", + "url": "layer:tls-client" + }, + { + "branch": "refs/heads/release_1.24", + "rev": "b93fae0e73bb48074deb0062db204b621caa9f1f", + "url": "layer:kubernetes-common" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "5b0926cdc45f511a0040b0b26f89bd174d5c81eb", + "url": "layer:hacluster" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", + "url": "layer:leadership" + }, + { + "branch": "refs/heads/release_1.24", + "rev": "4db88333338916dac097568ad2610c3024320b05", + "url": "kubeapi-load-balancer" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "95d744d1dbc4d86fb0462283c9371619bf5bbc24", + "url": "interface:nrpe-external-master" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", + "url": "interface:http" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "d9850016d930a6d507b9fd45e2598d327922b140", + "url": "interface:tls-certificates" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "2b714e90b1b8845ce7390bb1dad5a56a65437907", + "url": "interface:hacluster" + }, + { + "branch": "refs/heads/main\nrefs/heads/release_1.24", + "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27", + "url": "interface:public-address" + } + ], + "signatures": { + ".build.manifest": [ + "build", + "dynamic", + "unchecked" + ], + ".github/workflows/main.yml": [ + "kubeapi-load-balancer", + "static", + "c457e9ca89018f53bd3b4e637bb8a7b5599e8748fd514547d4afd4137b908b0e" + ], + ".gitignore": [ + "kubeapi-load-balancer", + "static", + "58e67f82f991b0c2d359d93622964c7c4f963aff3f8e2b7224b69810606c6c42" + ], + "AUTHORS": [ + "layer:nginx", + "static", + "5e460cc5d7fe5ce6dc5c4e8eefc13159ee58874667baf9af3b5fa9b597a10fa2" + ], + "CONTRIBUTING.md": [ + "kubeapi-load-balancer", + "static", + "7155516596ae597b0b7065f0463ff69031d689c0fc565998b51c06d999129d5a" + ], + "LICENSE": [ + "kubeapi-load-balancer", + "static", + "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd" + ], + "Makefile": [ + "layer:basic", + "static", + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" + ], + "README.md": [ + "kubeapi-load-balancer", + "static", + "9efc40856c08af5871a051144d8c3bb518983a3bca118defbb81ad849b3f3c8d" + ], + "actions.yaml": [ + "layer:debug", + "dynamic", + "cea290e28bc78458ea4a56dcad39b9a880c67e4ba53b774ac46bd8778618c7b9" + ], + "actions/debug": [ + "layer:debug", + "static", + "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046" + ], + "bin/charm-env": [ + "layer:basic", + "static", + "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5" + ], + "bin/layer_option": [ + "layer:options", + "static", + "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc" + ], + "config.yaml": [ + "kubeapi-load-balancer", + "dynamic", + "586a155cd5fb93090f379e3c1ec9d350b89d73c58ebad447b03e36a886010ba7" + ], + "copyright": [ + "kubeapi-load-balancer", + "static", + "badd4492d214890abd07b615f9e1a7a5ff3339b6c44655a826c746a9263ff00d" + ], + "copyright.layer-apt": [ + "layer:apt", + "static", + "5123b2d0220fefb4424a463216fb41a6dd7cfad49c9799ba7037f1e74a2fd6bc" + ], + "copyright.layer-basic": [ + "layer:basic", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-leadership": [ + "layer:leadership", + "static", + "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9" + ], + "copyright.layer-metrics": [ + "layer:metrics", + "static", + "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c" + ], + "copyright.layer-nagios": [ + "layer:nagios", + "static", + "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749" + ], + "copyright.layer-nginx": [ + "layer:nginx", + "static", + "66b7d69f452f9203cbf702c57c58b16b359be9970781deb0e21893620dd52516" + ], + "copyright.layer-options": [ + "layer:options", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-status": [ + "layer:status", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "debug-scripts/charm-unitdata": [ + "layer:debug", + "static", + "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7" + ], + "debug-scripts/filesystem": [ + "layer:debug", + "static", + "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7" + ], + "debug-scripts/juju-logs": [ + "layer:debug", + "static", + "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e" + ], + "debug-scripts/juju-network-get": [ + "layer:debug", + "static", + "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5" + ], + "debug-scripts/network": [ + "layer:debug", + "static", + "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e" + ], + "debug-scripts/packages": [ + "layer:debug", + "static", + "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75" + ], + "debug-scripts/sysctl": [ + "layer:debug", + "static", + "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a" + ], + "debug-scripts/systemd": [ + "layer:debug", + "static", + "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106" + ], + "debug-scripts/tls-certs": [ + "layer:tls-client", + "static", + "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b" + ], + "docs/status.md": [ + "layer:status", + "static", + "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a" + ], + "hooks/apiserver-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/apiserver-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/apiserver-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/apiserver-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/collect-metrics": [ + "layer:metrics", + "static", + "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558" + ], + "hooks/config-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/hook.template": [ + "layer:basic", + "static", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/install": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/leader-elected": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/leader-settings-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/post-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/pre-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/relations/hacluster/.stestr.conf": [ + "interface:hacluster", + "static", + "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774" + ], + "hooks/relations/hacluster/README.md": [ + "interface:hacluster", + "static", + "7fad91e409c6e559cdb76d11c89c325531adc25679049a629a28c4f890755f1f" + ], + "hooks/relations/hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/copyright": [ + "interface:hacluster", + "static", + "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893" + ], + "hooks/relations/hacluster/interface.yaml": [ + "interface:hacluster", + "static", + "5f4e6c8d7b2884bdceeee422821f4db7163dbfa7994d86cb405ffef2c3dea43c" + ], + "hooks/relations/hacluster/interface_hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/interface_hacluster/common.py": [ + "interface:hacluster", + "static", + "eabe164702e7a98dd7e05e1ed34e556cfad4f43b37b015c8e21b51c84a316a2c" + ], + "hooks/relations/hacluster/requires.py": [ + "interface:hacluster", + "static", + "68cf3ed22af30e42f34fc70ca484e8e4eeaedac6410bd3f228677cc791e6f46c" + ], + "hooks/relations/hacluster/test-requirements.txt": [ + "interface:hacluster", + "static", + "63756e4b1c67bc161cee0d30d460dbb83911b2c064dc1c55454a30c1ab877616" + ], + "hooks/relations/http/.gitignore": [ + "interface:http", + "static", + "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8" + ], + "hooks/relations/http/README.md": [ + "interface:http", + "static", + "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d" + ], + "hooks/relations/http/__init__.py": [ + "interface:http", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/http/interface.yaml": [ + "interface:http", + "static", + "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13" + ], + "hooks/relations/http/provides.py": [ + "interface:http", + "static", + "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877" + ], + "hooks/relations/http/requires.py": [ + "interface:http", + "static", + "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd" + ], + "hooks/relations/nrpe-external-master/README.md": [ + "interface:nrpe-external-master", + "static", + "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636" + ], + "hooks/relations/nrpe-external-master/__init__.py": [ + "interface:nrpe-external-master", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/nrpe-external-master/interface.yaml": [ + "interface:nrpe-external-master", + "static", + "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0" + ], + "hooks/relations/nrpe-external-master/provides.py": [ + "interface:nrpe-external-master", + "static", + "54e5400de99c051ecf6453776ad416b1cb8c6b73b34cbe2f41b617a8ed7b9daa" + ], + "hooks/relations/public-address/README.md": [ + "interface:public-address", + "static", + "7225effe61bfd8571447b8b685a2ecb52be17431b3066a5306330954c4cb064d" + ], + "hooks/relations/public-address/__init__.py": [ + "interface:public-address", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/public-address/interface.yaml": [ + "interface:public-address", + "static", + "49d6777a54aa84c7d3be8d531be237564e90f2e4cb2be05ef5617a372a382340" + ], + "hooks/relations/public-address/provides.py": [ + "interface:public-address", + "static", + "7c99b0fe987d38773ed3e67c0378fdb78748c04d6895489cd4bca40aaeb051b2" + ], + "hooks/relations/public-address/requires.py": [ + "interface:public-address", + "static", + "d6a7c6c0762d29a5db19afb4cf82af50812988d5e19a3a48fcbe8b0f6fec12a5" + ], + "hooks/relations/tls-certificates/.gitignore": [ + "interface:tls-certificates", + "static", + "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc" + ], + "hooks/relations/tls-certificates/README.md": [ + "interface:tls-certificates", + "static", + "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a" + ], + "hooks/relations/tls-certificates/__init__.py": [ + "interface:tls-certificates", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/tls-certificates/docs/common.md": [ + "interface:tls-certificates", + "static", + "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff" + ], + "hooks/relations/tls-certificates/docs/provides.md": [ + "interface:tls-certificates", + "static", + "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe" + ], + "hooks/relations/tls-certificates/docs/requires.md": [ + "interface:tls-certificates", + "static", + "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e" + ], + "hooks/relations/tls-certificates/interface.yaml": [ + "interface:tls-certificates", + "static", + "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa" + ], + "hooks/relations/tls-certificates/make_docs": [ + "interface:tls-certificates", + "static", + "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37" + ], + "hooks/relations/tls-certificates/provides.py": [ + "interface:tls-certificates", + "static", + "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c" + ], + "hooks/relations/tls-certificates/pydocmd.yml": [ + "interface:tls-certificates", + "static", + "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8" + ], + "hooks/relations/tls-certificates/requires.py": [ + "interface:tls-certificates", + "static", + "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba" + ], + "hooks/relations/tls-certificates/tls_certificates_common.py": [ + "interface:tls-certificates", + "static", + "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c" + ], + "hooks/relations/tls-certificates/tox.ini": [ + "interface:tls-certificates", + "static", + "7ab8ab53e5ed98cfa7fb5c1d5009f84077a4bb76640ba64f561ef7ea3a702eab" + ], + "hooks/start": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/stop": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/update-status": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/upgrade-charm": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/website-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/website-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/website-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/website-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "icon.svg": [ + "kubeapi-load-balancer", + "static", + "92271bf7063cc3a85a6d0fe2841250cf9bf8cd72697f3655f03ada39f8aee029" + ], + "layer.yaml": [ + "kubeapi-load-balancer", + "dynamic", + "d7bac049bb8874aaab83bbe0339f1c1a4e726f27e548fa9705a0c890db70d5b2" + ], + "lib/.gitkeep": [ + "layer:nginx", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "lib/charms/apt.py": [ + "layer:apt", + "static", + "c7613992eb33ac94d83fbf02f467b614ea5112eaf561c4715def90989cefa531" + ], + "lib/charms/layer/__init__.py": [ + "layer:basic", + "static", + "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f" + ], + "lib/charms/layer/basic.py": [ + "layer:basic", + "static", + "d120158e0c305a3b4529426a1a63a2f59af4f5730dccf3a59a9ffe1988494cee" + ], + "lib/charms/layer/execd.py": [ + "layer:basic", + "static", + "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d" + ], + "lib/charms/layer/hacluster.py": [ + "layer:hacluster", + "static", + "f58e0c1503187247f858ff3c9a1166d59107afd1557ba89e4878ec2e79304f8a" + ], + "lib/charms/layer/kubernetes_common.py": [ + "layer:kubernetes-common", + "static", + "bc89bd609a8e94102e00a192b7ae3caa813cca5e356536330494742bfdb6c4cb" + ], + "lib/charms/layer/nagios.py": [ + "layer:nagios", + "static", + "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c" + ], + "lib/charms/layer/nginx.py": [ + "layer:nginx", + "static", + "5fea9e756b8e9ad09d0256d9f2a1e8e2169a97741af256653ca85b4412e40174" + ], + "lib/charms/layer/options.py": [ + "layer:options", + "static", + "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2" + ], + "lib/charms/layer/status.py": [ + "layer:status", + "static", + "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6" + ], + "lib/charms/layer/tls_client.py": [ + "layer:tls-client", + "static", + "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e" + ], + "lib/charms/leadership.py": [ + "layer:leadership", + "static", + "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332" + ], + "lib/debug_script.py": [ + "layer:debug", + "static", + "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453" + ], + "lib/nginxlib.py": [ + "layer:nginx", + "static", + "bae474acba0fbf9da21f1372dcda1dba848757c5e7cebb6fb22c29f04a67c0aa" + ], + "make_docs": [ + "layer:status", + "static", + "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1" + ], + "manifest.yaml": [ + "kubeapi-load-balancer", + "static", + "06bd2f274e54dccb127626d61d42fc2ac0b5a0d15da0713c3c36cd4363389d87" + ], + "metadata.yaml": [ + "kubeapi-load-balancer", + "dynamic", + "6861cfdcfbeead1cbb165aabbb34a7a5ec726ebe4862209af70fcba55a283caa" + ], + "metrics.yaml": [ + "kubeapi-load-balancer", + "static", + "94a5eb0b0966f8ba434d91ff1e9b99b1b4c3b3044657b236d4e742d3e0d57c47" + ], + "pydocmd.yml": [ + "layer:status", + "static", + "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330" + ], + "pyproject.toml": [ + "layer:apt", + "static", + "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de" + ], + "reactive/__init__.py": [ + "layer:leadership", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "reactive/apt.py": [ + "layer:apt", + "static", + "6fe40f18eb84a910a71a4acb7ec74856128de846de6029b4fc297a875692c837" + ], + "reactive/hacluster.py": [ + "layer:hacluster", + "static", + "7b56e9efc95ace190694e439eff210f0981811f89dc46a026a400e114f3f833d" + ], + "reactive/leadership.py": [ + "layer:leadership", + "static", + "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f" + ], + "reactive/load_balancer.py": [ + "kubeapi-load-balancer", + "static", + "bca19a310482a2ebb5b5887341998b116a6fb6c63506bea25fcd3eabd3bc1574" + ], + "reactive/nginx.py": [ + "layer:nginx", + "static", + "046769111b72a5a5aa7bfd6362db988361719586bee4e9b40a472f33c0cf09a8" + ], + "reactive/status.py": [ + "layer:status", + "static", + "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207" + ], + "reactive/tls_client.py": [ + "layer:tls-client", + "static", + "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8" + ], + "requirements.txt": [ + "layer:basic", + "static", + "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" + ], + "templates/.gitkeep": [ + "layer:nginx", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "templates/apilb.conf": [ + "kubeapi-load-balancer", + "static", + "2f84c6592f300bba3e197d22c0b43c24320ca5510e6d53cc8d5750d0061e1de6" + ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], + "templates/vhost.conf.ex": [ + "layer:nginx", + "static", + "f68c366c35a8487acb78da6f1086eeee33a3eccdbe5a524509039c0c41ad5d5a" + ], + "tests/data/charm.yaml": [ + "kubeapi-load-balancer", + "static", + "c20a3e6b0422cf2607ecbfcbf747e876e86abaa16381cbca3c987c4f65611bd4" + ], + "tests/data/ip_addr_json": [ + "layer:kubernetes-common", + "static", + "f129576a9e2c7738aca8669c642f123534eda63121ae450cec4cbda787b1eb06" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/integration/test_kubeapi-load-balancer_integration.py": [ + "kubeapi-load-balancer", + "static", + "ad9da65fe5b129dbf24adf0c2892f2405add076357273a8473602eb020540914" + ], + "tests/unit/conftest.py": [ + "kubeapi-load-balancer", + "static", + "b38bf2bc23b57be1345143e07e361e41f20cff55848e7ba3ce86400d61e16081" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "23e097e7f21e4f4f062caac0146bb85373e895a30be1be5667b90d0e84435882" + ], + "tests/unit/test_kubeapi_load_balancer.py": [ + "kubeapi-load-balancer", + "static", + "8c31c2541800259eab3461d0295ed0c76d763596b2a99a5ecdd683d65402517f" + ], + "tests/validate-wheelhouse.sh": [ + "kubeapi-load-balancer", + "static", + "1c74bea041866cf4bd75763190d3c512e1d63a19b04e35178a64b8c517bb3231" + ], + "tox.ini": [ + "kubeapi-load-balancer", + "static", + "db04c740dd6f024f68c7f24a1e7d4cf3c9d92332475e003ce91a24bc4c7e1002" + ], + "version": [ + "kubeapi-load-balancer", + "dynamic", + "d9a4b742c183b4dd1b9fbf1e74567e06d0d4aa5538814dfde937416b3ac1bcc9" + ], + "wheelhouse.txt": [ + "kubeapi-load-balancer", + "dynamic", + "9cabe75bb18bd698c311a37ba23e8b874196f29469fe6898f87800e7f16fc8fd" + ], + "wheelhouse/Jinja2-3.0.3.tar.gz": [ + "layer:basic", + "dynamic", + "611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7" + ], + "wheelhouse/MarkupSafe-2.0.1.tar.gz": [ + "layer:basic", + "dynamic", + "594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a" + ], + "wheelhouse/PyYAML-5.3.1.tar.gz": [ + "layer:basic", + "dynamic", + "b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d" + ], + "wheelhouse/cached-property-1.5.2.tar.gz": [ + "__pip__", + "dynamic", + "9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130" + ], + "wheelhouse/charmhelpers-1.2.1.tar.gz": [ + "layer:basic", + "dynamic", + "298bb9e90d9392e2b66d10a5199b1b2d459dc8d5434b897913325904989dd2d7" + ], + "wheelhouse/charms.reactive-1.5.2.tar.gz": [ + "layer:basic", + "dynamic", + "4cb67e15402b95e766877666f985d157b7e917dc6170ec6d922d79928aefa6b8" + ], + "wheelhouse/loadbalancer_interface-1.2.0.tar.gz": [ + "kubeapi-load-balancer", + "dynamic", + "f2b31a5bf25b0435eee696685af78082c8a93fbe85336755bea5b17392a584bd" + ], + "wheelhouse/marshmallow-3.14.1.tar.gz": [ + "__pip__", + "dynamic", + "4c05c1684e0e97fe779c62b91878f173b937fe097b356cd82f793464f5bc6138" + ], + "wheelhouse/marshmallow-enum-1.5.1.tar.gz": [ + "__pip__", + "dynamic", + "38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58" + ], + "wheelhouse/netaddr-0.7.19.tar.gz": [ + "layer:basic", + "dynamic", + "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd" + ], + "wheelhouse/ops-1.5.5.tar.gz": [ + "__pip__", + "dynamic", + "07210019819daf35693585619d01b620f89640f8a9b4d50061f4d84f9fcf31e5" + ], + "wheelhouse/ops_reactive_interface-1.0.1.tar.gz": [ + "__pip__", + "dynamic", + "9ed351c42fc187299c23125975aa3dfee9f6aaae0c9d49bce8904ac079255dba" + ], + "wheelhouse/pbr-5.11.1.tar.gz": [ + "__pip__", + "dynamic", + "aefc51675b0b533d56bb5fd1c8c6c0522fe31896679882e1c4c63d5e4a0fccb3" + ], + "wheelhouse/pip-18.1.tar.gz": [ + "layer:basic", + "dynamic", + "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" + ], + "wheelhouse/pyaml-21.10.1.tar.gz": [ + "__pip__", + "dynamic", + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" + ], + "wheelhouse/setuptools-41.6.0.zip": [ + "layer:basic", + "dynamic", + "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722" + ], + "wheelhouse/setuptools_scm-1.17.0.tar.gz": [ + "layer:basic", + "dynamic", + "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" + ], + "wheelhouse/toml-0.10.2.tar.gz": [ + "layer:nginx", + "dynamic", + "b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" + ], + "wheelhouse/wheel-0.33.6.tar.gz": [ + "layer:basic", + "dynamic", + "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646" + ] + } +} \ No newline at end of file diff --git a/kubeapi-load-balancer/.github/workflows/main.yml b/kubeapi-load-balancer/.github/workflows/main.yml new file mode 100644 index 0000000..06c6739 --- /dev/null +++ b/kubeapi-load-balancer/.github/workflows/main.yml @@ -0,0 +1,44 @@ +name: Test Suite +on: [pull_request] + +jobs: + call-inclusive-naming-check: + name: Inclusive naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + validate-wheelhouse: + name: Validate Wheelhouse + uses: charmed-kubernetes/workflows/.github/workflows/validate-wheelhouse.yaml@main + + lint-unit: + name: Lint Unit + uses: charmed-kubernetes/workflows/.github/workflows/lint-unit.yaml@main + + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + needs: + - call-inclusive-naming-check + - validate-wheelhouse + - lint-unit + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + juju-channel: 2.9/stable + bootstrap-constraints: "arch=amd64 cores=2 mem=4G" + bootstrap-options: "${{ secrets.FOCAL_BOOTSTRAP_OPTIONS }} --model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764" + - name: Run test + run: tox -e integration -- --basetemp=/home/ubuntu/pytest diff --git a/kubeapi-load-balancer/.gitignore b/kubeapi-load-balancer/.gitignore new file mode 100644 index 0000000..96d476a --- /dev/null +++ b/kubeapi-load-balancer/.gitignore @@ -0,0 +1,4 @@ +.tox/ +__pycache__/ +*.pyc +*.charm \ No newline at end of file diff --git a/kubeapi-load-balancer/AUTHORS b/kubeapi-load-balancer/AUTHORS new file mode 100644 index 0000000..60e3e7d --- /dev/null +++ b/kubeapi-load-balancer/AUTHORS @@ -0,0 +1,2 @@ +Adam Stokes +Marco Ceppi diff --git a/kubeapi-load-balancer/CONTRIBUTING.md b/kubeapi-load-balancer/CONTRIBUTING.md new file mode 100644 index 0000000..f198d7c --- /dev/null +++ b/kubeapi-load-balancer/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# Contributor Guide + +This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions +for code, suggestions and documentation. +This page details a few notes, workflows and suggestions for how to make contributions most effective and help us +all build a better charm - please give them a read before working on any contributions. + +## Licensing + +This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may +make to this project. Please familiarise yourself with the terms of the license. + +Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to +use your contributions. +In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your +code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before +making any contributions. + +## Code of conduct + +We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct). + +## Contributing code + +To contribute code to this project, please use the following workflow: + +1. [Submit a bug](https://bugs.launchpad.net/charm-kubeapi-load-balancer/+filebug) to explain the need for and track the change. +2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code. +3. Submit a PR. The PR description should include a link to the bug on Launchpad. +4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag. +5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone + for further release according to priority. + +## Documentation + +Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs. +See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-kubeapi-load-balancer.md) diff --git a/kubeapi-load-balancer/LICENSE b/kubeapi-load-balancer/LICENSE new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/kubeapi-load-balancer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/kubeapi-load-balancer/Makefile b/kubeapi-load-balancer/Makefile new file mode 100644 index 0000000..a1ad3a5 --- /dev/null +++ b/kubeapi-load-balancer/Makefile @@ -0,0 +1,24 @@ +#!/usr/bin/make + +all: lint unit_test + + +.PHONY: clean +clean: + @rm -rf .tox + +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) + +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/kubeapi-load-balancer/README.md b/kubeapi-load-balancer/README.md new file mode 100644 index 0000000..1061d7b --- /dev/null +++ b/kubeapi-load-balancer/README.md @@ -0,0 +1,15 @@ +# kubeapi-load-balancer + +Simple NGINX reverse proxy to lend a hand in HA kubernetes-control-plane deployments. + + +This charm is a component of Charmed Kubernetes. For full information, +please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-kubeapi-load-balancer). + +# Developers + +## Building the charm + +``` +make charm +``` diff --git a/kubeapi-load-balancer/actions.yaml b/kubeapi-load-balancer/actions.yaml new file mode 100644 index 0000000..8712b6b --- /dev/null +++ b/kubeapi-load-balancer/actions.yaml @@ -0,0 +1,2 @@ +"debug": + "description": "Collect debug data" diff --git a/kubeapi-load-balancer/actions/debug b/kubeapi-load-balancer/actions/debug new file mode 100755 index 0000000..8ba160e --- /dev/null +++ b/kubeapi-load-balancer/actions/debug @@ -0,0 +1,102 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import tarfile +import tempfile +import traceback +from contextlib import contextmanager +from datetime import datetime +from charmhelpers.core.hookenv import action_set, local_unit + +archive_dir = None +log_file = None + + +@contextmanager +def archive_context(): + """ Open a context with a new temporary directory. + + When the context closes, the directory is archived, and the archive + location is added to Juju action output. """ + global archive_dir + global log_file + with tempfile.TemporaryDirectory() as temp_dir: + name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S") + archive_dir = os.path.join(temp_dir, name) + os.makedirs(archive_dir) + with open("%s/debug.log" % archive_dir, "w") as log_file: + yield + os.chdir(temp_dir) + tar_path = "/home/ubuntu/%s.tar.gz" % name + with tarfile.open(tar_path, "w:gz") as f: + f.add(name) + action_set({ + "path": tar_path, + "command": "juju scp %s:%s ." % (local_unit(), tar_path), + "message": " ".join([ + "Archive has been created on unit %s." % local_unit(), + "Use the juju scp command to copy it to your local machine." + ]) + }) + + +def log(msg): + """ Log a message that will be included in the debug archive. + + Must be run within archive_context """ + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + for line in str(msg).splitlines(): + log_file.write(timestamp + " | " + line.rstrip() + "\n") + + +def run_script(script): + """ Run a single script. Must be run within archive_context """ + log("Running script: " + script) + script_dir = os.path.join(archive_dir, script) + os.makedirs(script_dir) + env = os.environ.copy() + env["PYTHONPATH"] = "lib" # allow same imports as reactive code + env["DEBUG_SCRIPT_DIR"] = script_dir + with open(script_dir + "/stdout", "w") as stdout: + with open(script_dir + "/stderr", "w") as stderr: + process = subprocess.Popen( + "debug-scripts/" + script, + stdout=stdout, stderr=stderr, env=env + ) + try: + exit_code = process.wait(timeout=300) + except subprocess.TimeoutExpired: + log("ERROR: still running, terminating") + process.terminate() + try: + exit_code = process.wait(timeout=10) + except subprocess.TimeoutExpired: + log("ERROR: still running, killing") + process.kill() + exit_code = process.wait(timeout=10) + if exit_code != 0: + log("ERROR: %s failed with exit code %d" % (script, exit_code)) + + +def run_all_scripts(): + """ Run all scripts. For the sake of robustness, log and ignore any + exceptions that occur. + + Must be run within archive_context """ + scripts = os.listdir("debug-scripts") + for script in scripts: + try: + run_script(script) + except: + log(traceback.format_exc()) + + +def main(): + """ Open an archive context and run all scripts. """ + with archive_context(): + run_all_scripts() + + +if __name__ == "__main__": + main() diff --git a/kubeapi-load-balancer/bin/charm-env b/kubeapi-load-balancer/bin/charm-env new file mode 100755 index 0000000..d211ce9 --- /dev/null +++ b/kubeapi-load-balancer/bin/charm-env @@ -0,0 +1,107 @@ +#!/bin/bash + +VERSION="1.0.0" + + +find_charm_dirs() { + # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous. + if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then + if [[ -z "$JUJU_CHARM_DIR" ]]; then + # accept $CHARM_DIR to be more forgiving + export JUJU_CHARM_DIR="$CHARM_DIR" + fi + if [[ -z "$CHARM_DIR" ]]; then + # set CHARM_DIR as well to help with backwards compatibility + export CHARM_DIR="$JUJU_CHARM_DIR" + fi + return + fi + # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate + # (because there's got to be at least one principle) charm directory; + # if there are several, pick the first by alpha order. + agents_dir="/var/lib/juju/agents" + if [[ -d "$agents_dir" ]]; then + desired_charm="$1" + found_charm_dir="" + if [[ -n "$desired_charm" ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')" + if [[ "$charm_name" == "$desired_charm" ]]; then + if [[ -n "$found_charm_dir" ]]; then + >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + found_charm_dir="$charm_dir" + fi + done + if [[ -z "$found_charm_dir" ]]; then + >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + export JUJU_CHARM_DIR="$found_charm_dir" + export CHARM_DIR="$found_charm_dir" + return + fi + # shellcheck disable=SC2126 + non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)" + if [[ "$non_subordinates" -gt 1 ]]; then + >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context' + exit 1 + elif [[ "$non_subordinates" -eq 1 ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then + continue + fi + export JUJU_CHARM_DIR="$charm_dir" + export CHARM_DIR="$charm_dir" + return + done + fi + fi + >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context' + exit 1 +} + +try_activate_venv() { + if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then + . "$JUJU_CHARM_DIR/../.venv/bin/activate" + fi +} + +find_wrapped() { + PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")" +} + + +if [[ "$1" == "--version" || "$1" == "-v" ]]; then + echo "$VERSION" + exit 0 +fi + + +# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous +# NB: --charm option must come first +# NB: option must be processed outside find_charm_dirs to modify $@ +charm_name="" +if [[ "$1" == "--charm" ]]; then + charm_name="$2" + shift; shift +fi + +find_charm_dirs "$charm_name" +try_activate_venv +export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH" + +if [[ "$(basename "$0")" == "charm-env" ]]; then + # being used as a shebang + exec "$@" +elif [[ "$0" == "$BASH_SOURCE" ]]; then + # being invoked as a symlink wrapping something to find in the venv + exec "$(find_wrapped)" "$@" +elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then + # being sourced directly; do nothing + /bin/true +else + # being sourced for wrapped bash helpers + . "$(find_wrapped)" +fi diff --git a/kubeapi-load-balancer/bin/layer_option b/kubeapi-load-balancer/bin/layer_option new file mode 100755 index 0000000..3253ef8 --- /dev/null +++ b/kubeapi-load-balancer/bin/layer_option @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import sys +import argparse +from charms import layer + + +parser = argparse.ArgumentParser(description='Access layer options.') +parser.add_argument('section', + help='the section, or layer, the option is from') +parser.add_argument('option', + help='the option to access') + +args = parser.parse_args() +value = layer.options.get(args.section, args.option) +if isinstance(value, bool): + sys.exit(0 if value else 1) +elif isinstance(value, list): + for val in value: + print(val) +else: + print(value) diff --git a/kubeapi-load-balancer/config.yaml b/kubeapi-load-balancer/config.yaml new file mode 100644 index 0000000..e6a860d --- /dev/null +++ b/kubeapi-load-balancer/config.yaml @@ -0,0 +1,91 @@ +"options": + "nagios_context": + "default": "juju" + "type": "string" + "description": | + Used by the nrpe subordinate charms. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + "nagios_servicegroups": + "default": "" + "type": "string" + "description": | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup + "extra_packages": + "description": > + Space separated list of extra deb packages to install. + "type": "string" + "default": "" + "package_status": + "default": "install" + "type": "string" + "description": > + The status of service-affecting packages will be set to this + value in the dpkg database. Valid values are "install" and "hold". + "install_sources": + "description": > + List of extra apt sources, per charm-helpers standard + format (a yaml list of strings encoded as a string). Each source + may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + "type": "string" + "default": "" + "install_keys": + "description": > + List of signing keys for install_sources package sources, per + charmhelpers standard format (a yaml list of strings encoded as + a string). The keys should be the full ASCII armoured GPG public + keys. While GPG key ids are also supported and looked up on a + keyserver, operators should be aware that this mechanism is + insecure. null can be used if a standard package signing key is + used that will already be installed on the machine, and for PPA + sources where the package signing key is securely retrieved from + Launchpad. + "type": "string" + "default": "" + "port": + "type": "int" + "default": !!int "443" + "description": |- + The port to run the loadbalancer + "host": + "type": "string" + "default": "127.0.0.1" + "description": "listen address" + "ha-cluster-vip": + "type": "string" + "description": | + Virtual IP for the charm to use with the HA Cluster subordinate charm + Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are + separated by spaces. + "default": "" + "ha-cluster-dns": + "type": "string" + "description": | + DNS entry to use with the HA Cluster subordinate charm. + Mutually exclusive with ha-cluster-vip. + "default": "" + "extra_sans": + "type": "string" + "default": "" + "description": | + Space-separated list of extra SAN entries to add to the x509 certificate + created for the load balancers. + "proxy_read_timeout": + "type": "int" + "default": !!int "600" + "description": "Timeout in seconds for reading a response from proxy server." + "loadbalancer-ips": + "type": "string" + "description": | + Space seperated list of IP addresses of loadbalancers in front of control plane. + A common case for this is virtual IP addresses that are floated in front of the + kubeapi-load-balancer charm. The workers will alternate IP addresses from this + list to distribute load. If you have 2 IPs and 4 workers, each IP will be used + by 2 workers. + "default": "" diff --git a/kubeapi-load-balancer/copyright b/kubeapi-load-balancer/copyright new file mode 100644 index 0000000..ac5e525 --- /dev/null +++ b/kubeapi-load-balancer/copyright @@ -0,0 +1,13 @@ +Copyright 2016 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubeapi-load-balancer/copyright.layer-apt b/kubeapi-load-balancer/copyright.layer-apt new file mode 100644 index 0000000..0814dc1 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-apt @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Apt layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubeapi-load-balancer/copyright.layer-basic b/kubeapi-load-balancer/copyright.layer-basic new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-basic @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubeapi-load-balancer/copyright.layer-leadership b/kubeapi-load-balancer/copyright.layer-leadership new file mode 100644 index 0000000..08b983f --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-leadership @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Leadership Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubeapi-load-balancer/copyright.layer-metrics b/kubeapi-load-balancer/copyright.layer-metrics new file mode 100644 index 0000000..2df15bd --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-metrics @@ -0,0 +1,13 @@ +Copyright 2016 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubeapi-load-balancer/copyright.layer-nagios b/kubeapi-load-balancer/copyright.layer-nagios new file mode 100644 index 0000000..c80db95 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-nagios @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2016, Canonical Ltd. +License: GPL-3 + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License version 3, as + published by the Free Software Foundation. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranties of + MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR + PURPOSE. See the GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . diff --git a/kubeapi-load-balancer/copyright.layer-nginx b/kubeapi-load-balancer/copyright.layer-nginx new file mode 100644 index 0000000..953f220 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-nginx @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Adam Stokes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/kubeapi-load-balancer/copyright.layer-options b/kubeapi-load-balancer/copyright.layer-options new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-options @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubeapi-load-balancer/copyright.layer-status b/kubeapi-load-balancer/copyright.layer-status new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-status @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubeapi-load-balancer/debug-scripts/charm-unitdata b/kubeapi-load-balancer/debug-scripts/charm-unitdata new file mode 100755 index 0000000..d2aac60 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/charm-unitdata @@ -0,0 +1,12 @@ +#!/usr/local/sbin/charm-env python3 + +import debug_script +import json +from charmhelpers.core import unitdata + +kv = unitdata.kv() +data = kv.getrange("") + +with debug_script.open_file("unitdata.json", "w") as f: + json.dump(data, f, indent=2) + f.write("\n") diff --git a/kubeapi-load-balancer/debug-scripts/filesystem b/kubeapi-load-balancer/debug-scripts/filesystem new file mode 100755 index 0000000..c5ec6d8 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/filesystem @@ -0,0 +1,17 @@ +#!/bin/sh +set -ux + +# report file system disk space usage +df -hT > $DEBUG_SCRIPT_DIR/df-hT +# estimate file space usage +du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h +# list the mounted filesystems +mount > $DEBUG_SCRIPT_DIR/mount +# list the mounted systems with ascii trees +findmnt -A > $DEBUG_SCRIPT_DIR/findmnt +# list block devices +lsblk > $DEBUG_SCRIPT_DIR/lsblk +# list open files +lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof +# list local system locks +lslocks > $DEBUG_SCRIPT_DIR/lslocks diff --git a/kubeapi-load-balancer/debug-scripts/juju-logs b/kubeapi-load-balancer/debug-scripts/juju-logs new file mode 100755 index 0000000..d27c458 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/juju-logs @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR diff --git a/kubeapi-load-balancer/debug-scripts/juju-network-get b/kubeapi-load-balancer/debug-scripts/juju-network-get new file mode 100755 index 0000000..983c8c4 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/juju-network-get @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import yaml +import debug_script + +with open('metadata.yaml') as f: + metadata = yaml.load(f) + +relations = [] +for key in ['requires', 'provides', 'peers']: + relations += list(metadata.get(key, {}).keys()) + +os.mkdir(os.path.join(debug_script.dir, 'relations')) + +for relation in relations: + path = 'relations/' + relation + with debug_script.open_file(path, 'w') as f: + cmd = ['network-get', relation] + subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT) diff --git a/kubeapi-load-balancer/debug-scripts/network b/kubeapi-load-balancer/debug-scripts/network new file mode 100755 index 0000000..944a355 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/network @@ -0,0 +1,11 @@ +#!/bin/sh +set -ux + +ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig +cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf +cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces +netstat -planut > $DEBUG_SCRIPT_DIR/netstat +route -n > $DEBUG_SCRIPT_DIR/route +iptables-save > $DEBUG_SCRIPT_DIR/iptables-save +dig google.com > $DEBUG_SCRIPT_DIR/dig-google +ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google diff --git a/kubeapi-load-balancer/debug-scripts/packages b/kubeapi-load-balancer/debug-scripts/packages new file mode 100755 index 0000000..b60a9cf --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/packages @@ -0,0 +1,7 @@ +#!/bin/sh +set -ux + +dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list +snap list > $DEBUG_SCRIPT_DIR/snap-list +pip2 list > $DEBUG_SCRIPT_DIR/pip2-list +pip3 list > $DEBUG_SCRIPT_DIR/pip3-list diff --git a/kubeapi-load-balancer/debug-scripts/sysctl b/kubeapi-load-balancer/debug-scripts/sysctl new file mode 100755 index 0000000..a86a6c8 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/sysctl @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +sysctl -a > $DEBUG_SCRIPT_DIR/sysctl diff --git a/kubeapi-load-balancer/debug-scripts/systemd b/kubeapi-load-balancer/debug-scripts/systemd new file mode 100755 index 0000000..8bb9b6f --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/systemd @@ -0,0 +1,9 @@ +#!/bin/sh +set -ux + +systemctl --all > $DEBUG_SCRIPT_DIR/systemctl +journalctl > $DEBUG_SCRIPT_DIR/journalctl +systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time +systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame +systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain +systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump diff --git a/kubeapi-load-balancer/debug-scripts/tls-certs b/kubeapi-load-balancer/debug-scripts/tls-certs new file mode 100755 index 0000000..2692e51 --- /dev/null +++ b/kubeapi-load-balancer/debug-scripts/tls-certs @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import shutil +import traceback +import debug_script +from charms import layer + +options = layer.options.get('tls-client') + +def copy_cert(source_key, name): + try: + source = options[source_key] + dest = os.path.join(debug_script.dir, name) + shutil.copy(source, dest) + except Exception: + traceback.print_exc() + +copy_cert('client_certificate_path', 'client.crt') +copy_cert('server_certificate_path', 'server.crt') +copy_cert('ca_certificate_path', 'ca.crt') diff --git a/kubeapi-load-balancer/docs/status.md b/kubeapi-load-balancer/docs/status.md new file mode 100644 index 0000000..c6cceab --- /dev/null +++ b/kubeapi-load-balancer/docs/status.md @@ -0,0 +1,91 @@ +

WorkloadState

+ +```python +WorkloadState(self, /, *args, **kwargs) +``` + +Enum of the valid workload states. + +Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + +

maintenance

+ +```python +maintenance(message) +``` + +Set the status to the `MAINTENANCE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

maint

+ +```python +maint(message) +``` + +Shorthand alias for +[maintenance](status.md#charms.layer.status.maintenance). + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

blocked

+ +```python +blocked(message) +``` + +Set the status to the `BLOCKED` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

waiting

+ +```python +waiting(message) +``` + +Set the status to the `WAITING` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

active

+ +```python +active(message) +``` + +Set the status to the `ACTIVE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

status_set

+ +```python +status_set(workload_state, message) +``` + +Set the status to the given workload state with a message. + +__Parameters__ + +- __`workload_state` (WorkloadState or str)__: State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. +- __`message` (str)__: Message to convey to the operator. + diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-broken b/kubeapi-load-balancer/hooks/apiserver-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/apiserver-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-changed b/kubeapi-load-balancer/hooks/apiserver-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/apiserver-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-departed b/kubeapi-load-balancer/hooks/apiserver-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/apiserver-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/apiserver-relation-joined b/kubeapi-load-balancer/hooks/apiserver-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/apiserver-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/certificates-relation-broken b/kubeapi-load-balancer/hooks/certificates-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/certificates-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/certificates-relation-changed b/kubeapi-load-balancer/hooks/certificates-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/certificates-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/certificates-relation-departed b/kubeapi-load-balancer/hooks/certificates-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/certificates-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/certificates-relation-joined b/kubeapi-load-balancer/hooks/certificates-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/certificates-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/collect-metrics b/kubeapi-load-balancer/hooks/collect-metrics new file mode 100755 index 0000000..8a27863 --- /dev/null +++ b/kubeapi-load-balancer/hooks/collect-metrics @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +import yaml +import os +from subprocess import check_output, check_call, CalledProcessError + + +def build_command(doc): + values = {} + metrics = doc.get("metrics", {}) + for metric, mdoc in metrics.items(): + if not mdoc: + continue + cmd = mdoc.get("command") + if cmd: + try: + value = check_output(cmd, shell=True, universal_newlines=True) + except CalledProcessError as e: + check_call(['juju-log', '-lERROR', + 'Error collecting metric {}:\n{}'.format( + metric, e.output)]) + continue + value = value.strip() + if value: + values[metric] = value + + if not values: + return None + command = ["add-metric"] + for metric, value in values.items(): + command.append("%s=%s" % (metric, value)) + return command + + +if __name__ == '__main__': + charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, ".."))) + metrics_yaml = os.path.join(charm_dir, "metrics.yaml") + with open(metrics_yaml) as f: + doc = yaml.load(f) + command = build_command(doc) + if command: + check_call(command) diff --git a/kubeapi-load-balancer/hooks/config-changed b/kubeapi-load-balancer/hooks/config-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/config-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/ha-relation-broken b/kubeapi-load-balancer/hooks/ha-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/ha-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/ha-relation-changed b/kubeapi-load-balancer/hooks/ha-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/ha-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/ha-relation-departed b/kubeapi-load-balancer/hooks/ha-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/ha-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/ha-relation-joined b/kubeapi-load-balancer/hooks/ha-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/ha-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/hook.template b/kubeapi-load-balancer/hooks/hook.template new file mode 100644 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/hook.template @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/install b/kubeapi-load-balancer/hooks/install new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/install @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-broken b/kubeapi-load-balancer/hooks/lb-consumers-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-changed b/kubeapi-load-balancer/hooks/lb-consumers-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-departed b/kubeapi-load-balancer/hooks/lb-consumers-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-joined b/kubeapi-load-balancer/hooks/lb-consumers-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/leader-elected b/kubeapi-load-balancer/hooks/leader-elected new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/leader-elected @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/leader-settings-changed b/kubeapi-load-balancer/hooks/leader-settings-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/leader-settings-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-broken b/kubeapi-load-balancer/hooks/loadbalancer-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-changed b/kubeapi-load-balancer/hooks/loadbalancer-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-departed b/kubeapi-load-balancer/hooks/loadbalancer-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/loadbalancer-relation-joined b/kubeapi-load-balancer/hooks/loadbalancer-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/loadbalancer-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/nrpe-external-master-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/post-series-upgrade b/kubeapi-load-balancer/hooks/post-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/post-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/pre-series-upgrade b/kubeapi-load-balancer/hooks/pre-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/pre-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf b/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf new file mode 100644 index 0000000..5fcccac --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/README.md b/kubeapi-load-balancer/hooks/relations/hacluster/README.md new file mode 100644 index 0000000..e8147ac --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/README.md @@ -0,0 +1,90 @@ +# Overview + +This interface handles the communication with the hacluster subordinate +charm using the `ha` interface protocol. + +# Usage + +## Requires + +The interface layer will set the following reactive states, as appropriate: + + * `{relation_name}.connected` The relation is established and ready for + the local charm to configure the hacluster subordinate charm. The + configuration of the resources to manage for the hacluster charm + can be managed via one of the following methods: + + * `manage_resources` method + * `bind_on` method + + Configuration of the managed resources within the hacluster can be + managed by passing `common.CRM` object definitions to the + `manage_resources` method. + + * `{relation_name}.available` The hacluster is up and ready. + +For example: +```python +from charms.reactive import when, when_not +from charms.reactive import set_state, remove_state + +from relations.hacluster.common import CRM + + +@when('ha.connected') +def cluster_connected(hacluster): + + resources = CRM() + resources.primitive('res_vip', 'ocf:IPAddr2', + params='ip=10.0.3.100 nic=eth0', + op='monitor interval="10s"') + resources.clone('cl_res_vip', 'res_vip') + + hacluster.bind_on(iface='eth0', mcastport=4430) + hacluster.manage_resources(resources) +``` + +Additionally, for more code clarity a custom object implements the interface +defined in common.ResourceDescriptor can be used to simplify the code for +reuse. + +For example: +```python +import ipaddress + +from relation.hacluster.common import CRM +from relation.hacluster.common import ResourceDescriptor + +class VirtualIP(ResourceDescriptor): + def __init__(self, vip, nic='eth0'): + self.vip = vip + self.nic = 'eth0' + + def configure_resource(self, crm): + ipaddr = ipaddress.ip_address(self.vip) + if isinstance(ipaddr, ipaddress.IPv4Address): + res_type = 'ocf:heartbeat:IPAddr2' + res_parms = 'ip={ip} nic={nic}'.format(ip=self.vip, + nic=self.nic) + else: + res_type = 'ocf:heartbeat:IPv6addr' + res_params = 'ipv6addr={ip} nic={nic}'.format(ip=self.vip, + nic=self.nic) + + crm.primitive('res_vip', res_type, params=res_params, + op='monitor interval="10s"') + crm.clone('cl_res_vip', 'res_vip') +``` + +Once the VirtualIP class above has been defined in charm code, it can make +the code a bit cleaner. The example above can thusly be written as: + +```python +@when('ha.connected') +def cluster_connected(hacluster): + resources = CRM() + resources.add(VirtualIP('10.0.3.100')) + + hacluster.bind_on(iface='eth0', mcastport=4430) + hacluster.manage_resources(resources) +``` diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/__init__.py b/kubeapi-load-balancer/hooks/relations/hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/copyright b/kubeapi-load-balancer/hooks/relations/hacluster/copyright new file mode 100644 index 0000000..5a49dcb --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/copyright @@ -0,0 +1,21 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2015, Canonical Ltd. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml new file mode 100644 index 0000000..f03f3d7 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml @@ -0,0 +1,16 @@ +name: hacluster +summary: | + Provides the hacluster interface used for configuring Corosync + and Pacemaker services. +maintainer: OpenStack Charmers +ignore: + - '.gitignore' + - '.gitreview' + - '.testr.conf' + - 'test-requirements' + - 'tox.ini' + - 'unit_tests' + - '.zuul.yaml' + - 'setup.cfg' + - 'setup.py' + - '**/ops_ha_interface.py' diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/__init__.py b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py new file mode 100644 index 0000000..6e23d34 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py @@ -0,0 +1,1008 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import ipaddress +import json + + +class ResourceManagement(): + + def data_changed(self, data_id, data, hash_type='md5'): + raise NotImplementedError + + def get_local(self, key, default=None, scope=None): + raise NotImplementedError + + def set_local(self, key=None, value=None, data=None, scope=None, **kwdata): + raise NotImplementedError + + def set_remote(self, key=None, value=None, data=None, scope=None, + **kwdata): + raise NotImplementedError + + def is_clustered(self): + """Has the hacluster charm set clustered? + + The hacluster charm sets cluster=True when it determines it is ready. + Check the relation data for clustered and force a boolean return. + + :returns: boolean + """ + clustered_values = self.get_remote_all('clustered') + if clustered_values: + # There is only ever one subordinate hacluster unit + clustered = clustered_values[0] + # Future versions of hacluster will return a bool + # Current versions return a string + if type(clustered) is bool: + return clustered + elif (clustered is not None and + (clustered.lower() == 'true' or + clustered.lower() == 'yes')): + return True + return False + + def bind_on(self, iface=None, mcastport=None): + relation_data = {} + if iface: + relation_data['corosync_bindiface'] = iface + if mcastport: + relation_data['corosync_mcastport'] = mcastport + + if relation_data and self.data_changed('hacluster-bind_on', + relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def manage_resources(self, crm): + """ + Request for the hacluster to manage the resources defined in the + crm object. + + res = CRM() + res.primitive('res_neutron_haproxy', 'lsb:haproxy', + op='monitor interval="5s"') + res.init_services('haproxy') + res.clone('cl_nova_haproxy', 'res_neutron_haproxy') + + hacluster.manage_resources(crm) + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + relation_data = { + 'json_{}'.format(k): json.dumps(v, sort_keys=True) + for k, v in crm.items() + } + if self.data_changed('hacluster-manage_resources', relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def bind_resources(self, iface=None, mcastport=None): + """Inform the ha subordinate about each service it should manage. The + child class specifies the services via self.ha_resources + + :param iface: string - Network interface to bind to + :param mcastport: int - Multicast port corosync should use for cluster + management traffic + """ + if mcastport is None: + mcastport = 4440 + resources_dict = self.get_local('resources') + self.bind_on(iface=iface, mcastport=mcastport) + if resources_dict: + resources = CRM(**resources_dict) + self.manage_resources(resources) + + def delete_resource(self, resource_name): + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add_delete_resource(resource_name) + self.set_local(resources=resources) + + def add_vip(self, name, vip, iface=None, netmask=None): + """Add a VirtualIP object for each user specified vip to self.resources + + :param name: string - Name of service + :param vip: string - Virtual IP to be managed + :param iface: string - Network interface to bind vip to + :param netmask: string - Netmask for vip + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + VirtualIP( + name, + vip, + nic=iface, + cidr=netmask,)) + + # Vip Group + group = 'grp_{}_vips'.format(name) + vip_res_group_members = [] + if resource_dict: + vip_resources = resource_dict.get('resources') + if vip_resources: + for vip_res in vip_resources: + if 'vip' in vip_res: + vip_res_group_members.append(vip_res) + resources.group(group, + *sorted(vip_res_group_members)) + + self.set_local(resources=resources) + + def remove_vip(self, name, vip, iface=None): + """Remove a virtual IP + + :param name: string - Name of service + :param vip: string - Virtual IP + :param iface: string - Network interface vip bound to + """ + if iface: + nic_name = iface + else: + nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] + self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) + + def add_init_service(self, name, service, clone=True): + """Add a InitService object for haproxy to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in init system + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + InitService(name, service, clone)) + self.set_local(resources=resources) + + def remove_init_service(self, name, service): + """Remove an init service + + :param name: string - Name of service + :param service: string - Name of service used in init system + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_systemd_service(self, name, service, clone=True): + """Add a SystemdService object to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in systemd + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + SystemdService(name, service, clone)) + self.set_local(resources=resources) + + def remove_systemd_service(self, name, service): + """Remove a systemd service + + :param name: string - Name of service + :param service: string - Name of service used in systemd + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_dnsha(self, name, ip, fqdn, endpoint_type): + """Add a DNS entry to self.resources + + :param name: string - Name of service + :param ip: string - IP address dns entry should resolve to + :param fqdn: string - The DNS entry name + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + DNSEntry(name, ip, fqdn, endpoint_type)) + + # DNS Group + group = 'grp_{}_hostnames'.format(name) + dns_res_group_members = [] + if resource_dict: + dns_resources = resource_dict.get('resources') + if dns_resources: + for dns_res in dns_resources: + if 'hostname' in dns_res: + dns_res_group_members.append(dns_res) + resources.group(group, + *sorted(dns_res_group_members)) + + self.set_local(resources=resources) + + def remove_dnsha(self, name, endpoint_type): + """Remove a DNS entry + + :param name: string - Name of service + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + self.delete_resource(res_key) + + def add_colocation(self, name, score, colo_resources, node_attribute=None): + """Add a colocation directive + + :param name: string - Name of colocation directive + :param score: string - ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY}. See + CRM.colocation for more details + :param colo_resources: List[string] - List of resource names to + colocate + :param node_attribute: Colocate resources on a set of nodes with this + attribute and not necessarily on the same node. + """ + node_config = {} + if node_attribute: + node_config = { + 'node_attribute': node_attribute} + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.colocation( + name, + score, + *colo_resources, + **node_config) + self.set_local(resources=resources) + + def remove_colocation(self, name): + """Remove a colocation directive + + :param name: string - Name of colocation directive + """ + self.delete_resource(name) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + raise NotImplementedError + + +class CRM(dict): + """ + Configuration object for Pacemaker resources for the HACluster + interface. This class provides access to the supported resources + available in the 'crm configure' within the HACluster. + + See Also + -------- + More documentation is available regarding the definitions of + primitives, clones, and other pacemaker resources at the crmsh + site at http://crmsh.github.io/man + """ + + # Constants provided for ordering constraints (e.g. the kind value) + MANDATORY = "Mandatory" + OPTIONAL = "Optional" + SERIALIZE = "Serialize" + + # Constants defining weights of constraints + INFINITY = "inf" + NEG_INFINITY = "-inf" + + # Constaints aliased to their interpretations for constraints + ALWAYS = INFINITY + NEVER = NEG_INFINITY + + def __init__(self, *args, **kwargs): + self['resources'] = {} + self['delete_resources'] = [] + self['resource_params'] = {} + self['groups'] = {} + self['ms'] = {} + self['orders'] = {} + self['colocations'] = {} + self['clones'] = {} + self['locations'] = {} + self['init_services'] = [] + self['systemd_services'] = [] + super(CRM, self).__init__(*args, **kwargs) + + def primitive(self, name, agent, description=None, **kwargs): + """Configures a primitive resource within Pacemaker. + + A primitive is used to describe a resource which should be managed + by the cluster. Primitives consist of a name, the agent type, and + various configuration options to the primitive. For example: + + crm.primitive('www8', 'apache', + params='configfile=/etc/apache/www8.conf', + operations='$id-ref=apache_ops') + + will create the an apache primitive (resource) for the www8 service + hosted by the Apache HTTP server. The parameters specified can either + be provided individually (e.g. a string) or as an iterable. + + The following example shows how to specify multiple ops for a drbd + volume in a Master/Slave configuration:: + + ops = ['monitor role=Master interval=60s', + 'monitor role=Slave interval=300s'] + + crm.primitive('r0', 'ocf:linbit:drbd', + params='drbd_resource=r0', + op=ops) + + Additional arguments may be passed in as kwargs in which the key of + the kwarg is prepended to the value. + + Parameters + ---------- + name: str + the name of the primitive. + agent: str + the type of agent to use to monitor the primitive resource + (e.g. ocf:linbit:drbd). + description: str, optional, kwarg + a description about the resource + params: str or iterable, optional, kwarg + parameters which are provided to the resource agent + meta: str or iterable, optional, kwarg + metadata information for the primitive resource + utilization: str or iterable, optional, kwarg + utilization information for the primitive resource + operations: str or iterable, optional, kwarg + operations information for the primitive resource in id_spec + format (e.g. $id= or $id-ref=) + op: str or iterable, optional, kwarg + op information regarding the primitive resource. This takes the + form of ' [= = ...]' + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_primitive + """ + resources = self['resources'] + resources[name] = agent + + specs = '' + if description: + specs = specs + 'description="%s"' % description + + # Use the ordering specified in the crm manual + for key in 'params', 'meta', 'utilization', 'operations', 'op': + if key not in kwargs: + continue + specs = specs + (' %s' % self._parse(key, kwargs[key])) + + if specs: + self['resource_params'][name] = specs + + def _parse(self, prefix, data): + results = '' + if isinstance(data, str): + data = [data] + + first = True + for d in data: + if first: + results = results + ' ' + first = False + results = results + ('%s %s ' % (prefix, d)) + results = results.rstrip() + return results + + def clone(self, name, resource, description=None, **kwargs): + """Creates a resource which should run on all nodes. + + Parameters + ---------- + name: str + the name of the clone + resource: str + the name or id of the resource to clone + description: str, optional + text containing a description for the clone + meta: str or list of str, optional, kwarg + metadata attributes to assign to the clone + params: str or list of str, optional, kwarg + parameters to assign to the clone + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_clone + """ + clone_specs = resource + if description: + clone_specs = clone_specs + (' description="%s"' % description) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + if not value: + continue + clone_specs = clone_specs + (' %s' % self._parse(key, value)) + + self['clones'][name] = clone_specs + + def colocation(self, name, score=ALWAYS, *resources, **kwargs): + """Configures the colocation constraints of resources. + + Provides placement constraints regarding resources defined within + the cluster. Using the colocate function, resource affinity or + anti-affinity can be defined. + + For example, the following code ensures that the nova-console service + always runs where the cluster vip is running: + + crm.colocation('console_with_vip', ALWAYS, + 'nova-console', 'vip') + + The affinity or anti-affinity of resources relationships is be + expressed in the `score` parameter. A positive score indicates that + the resources should run on the same node.A score of INFINITY (or + ALWAYS) will ensure the resources are always run on the same node(s) + and a score of NEG_INFINITY (or NEVER) ensures that the resources are + never run on the same node(s). + + crm.colocation('never_apache_with_dummy', NEVER, + 'apache', 'dummy') + + Any *resources values which are provided are treated as resources which + the colocation constraint applies to. At least two resources must be + defined as part of the ordering constraint. + + The resources take the form of [:role]. If the + colocation constraint applies specifically to a role, this information + should be included int he resource supplied. + + Parameters + ---------- + id: str + id or name of the colocation constraint + score: str {ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY} or int + the score or weight of the colocation constraint. A positive value + will indicate that the resources should run on the same node. A + negative value indicates that the resources should run on separate + nodes. + resources: str or list + the list of resources which the colocation constraint applies to. + node_attribute: str, optional, kwarg + can be used to run the resources on a set of nodes, not just a + single node. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_colocation + """ + specs = '%s: %s' % (score, ' '.join(resources)) + if 'node_attribute' in kwargs: + specs = specs + (' node-attribute=%s' % kwargs['node_attribute']) + self['colocations'][name] = specs + + def group(self, name, *resources, **kwargs): + """Creates a group of resources within Pacemaker. + + The created group includes the list of resources provided in the list + of resources supplied. For example:: + + crm.group('grp_mysql', 'res_mysql_rbd', 'res_mysql_fs', + 'res_mysql_vip', 'res_mysqld') + + will create the 'grp_mysql' resource group consisting of the + res_mysql_rbd, res_mysql_fs, res_mysql_vip, and res_mysqld resources. + + Parameters + ---------- + name: str + the name of the group resource + resources: list of str + the names or ids of resources to include within the group. + description: str, optional, kwarg + text to describe the resource + meta: str or list of str, optional, kwarg + metadata attributes to assign to the group + params: str or list of str, optional, kwarg + parameters to assign to the group + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_group + """ + specs = ' '.join(resources) + if 'description' in kwargs: + specs = specs + (' description=%s"' % kwargs['description']) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + specs = specs + (' %s' % self._parse(key, value)) + + self['groups'][name] = specs + + def remove_deleted_resources(self): + """Work through the existing resources and remove any mention of ones + which have been marked for deletion.""" + for res in self['delete_resources']: + for key in self.keys(): + if key == 'delete_resources': + continue + if isinstance(self[key], dict) and res in self[key].keys(): + del self[key][res] + elif isinstance(self[key], list) and res in self[key]: + self[key].remove(res) + elif isinstance(self[key], tuple) and res in self[key]: + self[key] = tuple(x for x in self[key] if x != res) + + def delete_resource(self, *resources): + """Specify objects/resources to be deleted from within Pacemaker. This + is not additive, the list of resources is set to exaclty what was + passed in. + + Parameters + ---------- + resources: str or list + the name or id of the specific resource to delete. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_delete + """ + self['delete_resources'] = resources + self.remove_deleted_resources() + + def add_delete_resource(self, resource): + """Specify an object/resource to delete from within Pacemaker. It can + be called multiple times to add additional resources to the deletion + list. + + Parameters + ---------- + resources: str + the name or id of the specific resource to delete. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_delete + """ + if resource not in self['delete_resources']: + # NOTE(fnordahl): this unpleasant piece of code is regrettably + # necessary for Python3.4 (and trusty) compability see LP: #1814218 + # and LP: #1813982 + self['delete_resources'] = tuple( + self['delete_resources'] or ()) + (resource,) + self.remove_deleted_resources() + + def init_services(self, *resources): + """Specifies that the service(s) is an init or upstart service. + + Services (resources) which are noted as upstart services are + disabled, stopped, and left to pacemaker to manage the resource. + + Parameters + ---------- + resources: str or list of str, varargs + The resources which should be noted as init services. + + Returns + ------- + None + """ + self['init_services'] = resources + + def systemd_services(self, *resources): + """Specifies that the service(s) is a systemd service. + + Services (resources) which are noted as systemd services are + disabled, stopped, and left to pacemaker to manage the resource. + + Parameters + ---------- + resources: str or list of str, varargs + The resources which should be noted as systemd services. + + Returns + ------- + None + """ + self['systemd_services'] = resources + + def ms(self, name, resource, description=None, **kwargs): + """Create a master/slave resource type. + + The following code provides an example of creating a master/slave + resource on drbd disk1:: + + crm.ms('disk1', 'drbd1', meta='notify=true globally-unique=false') + + Parameters + ---------- + name: str + the name or id of the master resource + resource: str + the name or id of the resource which now ha a master/slave + assocation tied to it. + description: str, optional + a textual description of the master resource + meta: str or list of strs, optional, kwargs + strings defining the metadata for the master/slave resource type + params: str or list of strs, optional, kwargs + parameter strings which should be passed to the master/slave + resource creation + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_ms + """ + specs = resource + if description: + specs = specs + (' description="%s"' % description) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + specs = specs + (' %s' % self._parse(key, value)) + + self['ms'][name] = specs + + def location(self, name, resource, **kwargs): + """Defines the preference of nodes for the given resource. + + The location constraitns consist of one or more rules which specify + a score to be awarded if the rules match. + + Parameters + ---------- + name: str + the name or id of the location constraint + resource: str + the name, id, resource, set, tag, or resoruce pattern defining the + set of resources which match the location placement constraint. + attributes: str or list str, optional, kwarg + attributes which should be assigned to the location constraint + rule: str or list of str, optional, kwarg + the rule(s) which define the location constraint rules when + selecting a location to run the resource. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_location + """ + specs = resource + + # Check if there are attributes assigned to the location and if so, + # format the spec string with the attributes + if 'attributes' in kwargs: + attrs = kwargs['attributes'] + if isinstance(attrs, str): + attrs = [attrs] + specs = specs + (' %s' % ' '.join(attrs)) + + if 'rule' in kwargs: + rules = kwargs['rule'] + specs = specs + (' %s' % self._parse('rule', rules)) + + self['locations'][name] = specs + + def order(self, name, score=None, *resources, **kwargs): + """Configures the ordering constraints of resources. + + Provides ordering constraints to resources defined in a Pacemaker + cluster which affect the way that resources are started, stopped, + promoted, etc. Basic ordering is provided by simply specifying the + ordering name and an ordered list of the resources which the ordering + constraint applies to. + + For example, the following code ensures that the apache resource is + started after the ClusterIP is started:: + + hacluster.order('apache-after-ip', 'ClusterIP', 'apache') + + By default, the ordering constraint will specify that the ordering + constraint is mandatory. The constraint behavior can be specified + using the 'score' keyword argument, e.g.:: + + hacluster.order('apache-after-ip', score=hacluster.OPTIONAL, + 'ClusterIP', 'apache') + + Any *resources values which are provided are treated as resources which + the ordering constraint applies to. At least two resources must be + defined as part of the ordering constraint. + + The resources take the form of [:]. If the + ordering constraint applies to a specific action for the resource, + this information should be included in the resource supplied. + + Parameters + ---------- + name: str + the id or name of the order constraint + resoures: str or list of strs in varargs format + the resources the ordering constraint applies to. The ordering + of the list of resources is used to provide the ordering. + score: {MANDATORY, OPTIONAL, SERIALIZED}, optional + the score of the ordering constraint. + symmetrical: boolean, optional, kwarg + when True, then the services for the resources will be stopped in + the reverse order. The default value for this is True. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_order + """ + specs = '' + if score: + specs = '%s:' % score + + specs = specs + (' %s' % ' '.join(resources)) + if 'symmetrical' in kwargs: + specs = specs + (' symmetrical=' % kwargs['symmetrical']) + + self['orders'][name] = specs + + def add(self, resource_desc): + """Adds a resource descriptor object to the CRM configuration. + + Adds a `ResourceDescriptor` object to the CRM configuration which + understands how to configure the resource itself. The + `ResourceDescriptor` object needs to know how to interact with this + CRM class in order to properly configure the pacemaker resources. + + The minimum viable resource descriptor object will implement a method + which takes a reference parameter to this CRM in order to configure + itself. + + Parameters + ---------- + resource_desC: ResourceDescriptor + an object which provides an abstraction of a monitored resource + within pacemaker. + + Returns + ------- + None + """ + method = getattr(resource_desc, 'configure_resource', None) + if not callable(method): + raise ValueError('Invalid resource_desc. The "configure_resource"' + ' function has not been defined.') + + method(self) + + +class ResourceDescriptor(object): + """ + A ResourceDescriptor provides a logical resource or concept and knows + how to configure pacemaker. + """ + + def configure_resource(self, crm): + """Configures the logical resource(s) within the CRM. + + This is the callback method which is invoked by the CRM in order + to allow this ResourceDescriptor to fully configure the logical + resource. + + For example, a Virtual IP may provide a standard abstraction and + configure the specific details under the covers. + """ + pass + + +class InitService(ResourceDescriptor): + def __init__(self, service_name, init_service_name, clone=True): + """Class for managing init resource + + :param service_name: string - Name of service + :param init_service_name: string - Name service uses in init system + :param clone: bool - clone service across all units + :returns: None + """ + self.service_name = service_name + self.init_service_name = init_service_name + self.clone = clone + + def configure_resource(self, crm): + """"Configure new init system service resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + res_key = 'res_{}_{}'.format( + self.service_name.replace('-', '_'), + self.init_service_name.replace('-', '_')) + res_type = 'lsb:{}'.format(self.init_service_name) + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + res_key, res_type, op='monitor interval="5s"', meta=_meta) + crm.init_services(self.init_service_name) + if self.clone: + clone_key = 'cl_{}'.format(res_key) + crm.clone(clone_key, res_key) + + +class VirtualIP(ResourceDescriptor): + def __init__(self, service_name, vip, nic=None, cidr=None): + """Class for managing VIP resource + + :param service_name: string - Name of service + :param vip: string - Virtual IP to be managed + :param nic: string - Network interface to bind vip to + :param cidr: string - Netmask for vip + :returns: None + """ + self.service_name = service_name + self.vip = vip + self.nic = nic + self.cidr = cidr + + def configure_resource(self, crm): + """Configure new vip resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + if self.nic: + vip_key = 'res_{}_{}_vip'.format(self.service_name, self.nic) + else: + vip_key = 'res_{}_{}_vip'.format( + self.service_name, + hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7]) + ipaddr = ipaddress.ip_address(self.vip) + if isinstance(ipaddr, ipaddress.IPv4Address): + res_type = 'ocf:heartbeat:IPaddr2' + res_params = 'ip="{}"'.format(self.vip) + else: + res_type = 'ocf:heartbeat:IPv6addr' + res_params = 'ipv6addr="{}"'.format(self.vip) + vip_params = 'ipv6addr' + vip_key = 'res_{}_{}_{}_vip'.format(self.service_name, self.nic, + vip_params) + + if self.nic: + res_params = '{} nic="{}"'.format(res_params, self.nic) + if self.cidr: + res_params = '{} cidr_netmask="{}"'.format(res_params, self.cidr) + # Monitor the VIP + _op_monitor = 'monitor timeout="20s" interval="10s" depth="0"' + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + vip_key, res_type, params=res_params, op=_op_monitor, meta=_meta) + + +class DNSEntry(ResourceDescriptor): + + def __init__(self, service_name, ip, fqdn, endpoint_type): + """Class for managing DNS entries + + :param service_name: string - Name of service + :param ip: string - IP to point DNS entry at + :param fqdn: string - DNS Entry + :param endpoint_type: string - The type of the endpoint represented by + the DNS record eg public, admin etc + :returns: None + """ + self.service_name = service_name + self.ip = ip + self.fqdn = fqdn + self.endpoint_type = endpoint_type + + def configure_resource(self, crm, res_type='ocf:maas:dns'): + """Configure new DNS resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :param res_type: string - Corosync Open Cluster Framework resource + agent to use for DNS HA + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + res_params = '' + if self.fqdn: + res_params = '{} fqdn="{}"'.format(res_params, self.fqdn) + if self.ip: + res_params = '{} ip_address="{}"'.format(res_params, self.ip) + crm.primitive(res_key, res_type, params=res_params) + + +class SystemdService(ResourceDescriptor): + def __init__(self, service_name, systemd_service_name, clone=True): + """Class for managing systemd resource + + :param service_name: string - Name of service + :param systemd_service_name: string - Name service uses in + systemd system + :param clone: bool - clone service across all units + :returns: None + """ + self.service_name = service_name + self.systemd_service_name = systemd_service_name + self.clone = clone + + def configure_resource(self, crm): + """"Configure new systemd system service resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + res_key = 'res_{}_{}'.format( + self.service_name.replace('-', '_'), + self.systemd_service_name.replace('-', '_')) + res_type = 'systemd:{}'.format(self.systemd_service_name) + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + res_key, res_type, op='monitor interval="5s"', meta=_meta) + crm.systemd_services(self.systemd_service_name) + if self.clone: + clone_key = 'cl_{}'.format(res_key) + crm.clone(clone_key, res_key) diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/requires.py b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py new file mode 100644 index 0000000..395a658 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py @@ -0,0 +1,58 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import relations.hacluster.interface_hacluster.common as common +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes +from charms.reactive.helpers import data_changed as rh_data_changed +from charmhelpers.core import hookenv + + +class HAClusterRequires(RelationBase, common.ResourceManagement): + # The hacluster charm is a subordinate charm and really only works + # for a single service to the HA Cluster relation, therefore set the + # expected scope to be GLOBAL. + scope = scopes.GLOBAL + + @hook('{requires:hacluster}-relation-joined') + def joined(self): + self.set_state('{relation_name}.connected') + + @hook('{requires:hacluster}-relation-changed') + def changed(self): + if self.is_clustered(): + self.set_state('{relation_name}.available') + else: + self.remove_state('{relation_name}.available') + + @hook('{requires:hacluster}-relation-{broken,departed}') + def departed(self): + self.remove_state('{relation_name}.available') + self.remove_state('{relation_name}.connected') + + def data_changed(self, data_id, data, hash_type='md5'): + return rh_data_changed(data_id, data, hash_type) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + values = [] + for conversation in self.conversations(): + for relation_id in conversation.relation_ids: + for unit in hookenv.related_units(relation_id): + value = hookenv.relation_get(key, + unit, + relation_id) or default + if value: + values.append(value) + return list(set(values)) diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt new file mode 100644 index 0000000..12452e5 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt @@ -0,0 +1,7 @@ +# Lint and unit test requirements +flake8 +stestr>=2.2.0 +charms.reactive +coverage>=3.6 +netifaces +git+https://github.com/canonical/operator.git#egg=ops diff --git a/kubeapi-load-balancer/hooks/relations/http/.gitignore b/kubeapi-load-balancer/hooks/relations/http/.gitignore new file mode 100644 index 0000000..3374ec2 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/http/.gitignore @@ -0,0 +1,5 @@ +# Emacs save files +*~ +\#*\# +.\#* + diff --git a/kubeapi-load-balancer/hooks/relations/http/README.md b/kubeapi-load-balancer/hooks/relations/http/README.md new file mode 100644 index 0000000..3d7822a --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/http/README.md @@ -0,0 +1,68 @@ +# Overview + +This interface layer implements the basic form of the `http` interface protocol, +which is used for things such as reverse-proxies, load-balanced servers, REST +service discovery, et cetera. + +# Usage + +## Provides + +By providing the `http` interface, your charm is providing an HTTP server that +can be load-balanced, reverse-proxied, used as a REST endpoint, etc. + +Your charm need only provide the port on which it is serving its content, as +soon as the `{relation_name}.available` state is set: + +```python +@when('website.available') +def configure_website(website): + website.configure(port=hookenv.config('port')) +``` + +## Requires + +By requiring the `http` interface, your charm is consuming one or more HTTP +servers, as a REST endpoint, to load-balance a set of servers, etc. + +Your charm should respond to the `{relation_name}.available` state, which +indicates that there is at least one HTTP server connected. + +The `services()` method returns a list of available HTTP services and their +associated hosts and ports. + +The return value is a list of dicts of the following form: + +```python +[ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... +] +``` + +A trivial example of handling this interface would be: + +```python +from charms.reactive.helpers import data_changed + +@when('reverseproxy.available') +def update_reverse_proxy_config(reverseproxy): + services = reverseproxy.services() + if not data_changed('reverseproxy.services', services): + return + for service in services: + for host in service['hosts']: + hookenv.log('{} has a unit {}:{}'.format( + services['service_name'], + host['hostname'], + host['port'])) +``` diff --git a/kubeapi-load-balancer/hooks/relations/http/__init__.py b/kubeapi-load-balancer/hooks/relations/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/http/interface.yaml b/kubeapi-load-balancer/hooks/relations/http/interface.yaml new file mode 100644 index 0000000..54e7748 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/http/interface.yaml @@ -0,0 +1,4 @@ +name: http +summary: Basic HTTP interface +version: 1 +repo: https://git.launchpad.net/~bcsaller/charms/+source/http diff --git a/kubeapi-load-balancer/hooks/relations/http/provides.py b/kubeapi-load-balancer/hooks/relations/http/provides.py new file mode 100644 index 0000000..86fa9b3 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/http/provides.py @@ -0,0 +1,67 @@ +import json + +from charmhelpers.core import hookenv +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpProvides(Endpoint): + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def get_ingress_address(self, rel_id=None): + # If no rel_id is provided, we fallback to the first one + if rel_id is None: + rel_id = self.relations[0].relation_id + return hookenv.ingress_address(rel_id, hookenv.local_unit()) + + def configure(self, port, private_address=None, hostname=None): + ''' configure the address(es). private_address and hostname can + be None, a single string address/hostname, or a list of addresses + and hostnames. Note that if a list is passed, it is assumed both + private_address and hostname are either lists or None ''' + for relation in self.relations: + ingress_address = self.get_ingress_address(relation.relation_id) + if type(private_address) is list or type(hostname) is list: + # build 3 lists to zip together that are the same length + length = max(len(private_address), len(hostname)) + p = [port] * length + a = private_address + [ingress_address] *\ + (length - len(private_address)) + h = hostname + [ingress_address] * (length - len(hostname)) + zipped_list = zip(p, a, h) + # now build an array of dictionaries from that in the desired + # format for the interface + data_list = [{'hostname': h, 'port': p, 'private-address': a} + for p, a, h in zipped_list] + # for backwards compatibility, we just send a single entry + # and have an array of dictionaries in a field of that + # entry for the other entries. + data = data_list.pop(0) + data['extended_data'] = json.dumps(data_list) + + relation.to_publish_raw.update(data) + else: + relation.to_publish_raw.update({ + 'hostname': hostname or ingress_address, + 'private-address': private_address or ingress_address, + 'port': port, + }) + + def set_remote(self, **kwargs): + # NB: This method provides backwards compatibility for charms that + # called RelationBase.set_remote. Most commonly, this was done by + # charms that needed to pass reverse proxy stanzas to http proxies. + # This type of interaction with base relation classes is discouraged, + # and should be handled with logic encapsulated in appropriate + # interfaces. Eventually, this method will be deprecated in favor of + # that behavior. + for relation in self.relations: + relation.to_publish_raw.update(kwargs) diff --git a/kubeapi-load-balancer/hooks/relations/http/requires.py b/kubeapi-load-balancer/hooks/relations/http/requires.py new file mode 100644 index 0000000..17ea6b7 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/http/requires.py @@ -0,0 +1,76 @@ +import json + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpRequires(Endpoint): + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + if any(unit.received_raw['port'] for unit in self.all_joined_units): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def services(self): + """ + Returns a list of available HTTP services and their associated hosts + and ports. + + The return value is a list of dicts of the following form:: + + [ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'private-address': private_address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... + ] + """ + def build_service_host(data): + private_address = data['private-address'] + host = data['hostname'] or private_address + if host and data['port']: + return (host, private_address, data['port']) + else: + return None + + services = {} + for relation in self.relations: + service_name = relation.application_name + service = services.setdefault(service_name, { + 'service_name': service_name, + 'hosts': [], + }) + host_set = set() + for unit in relation.joined_units: + data = unit.received_raw + host = build_service_host(data) + if host: + host_set.add(host) + + # if we have extended data, add it + if 'extended_data' in data: + for ed in json.loads(data['extended_data']): + host = build_service_host(ed) + if host: + host_set.add(host) + + service['hosts'] = [ + {'hostname': h, 'private-address': pa, 'port': p} + for h, pa, p in sorted(host_set) + ] + + ret = [s for s in services.values() if s['hosts']] + return ret diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md new file mode 100644 index 0000000..e33deb8 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/README.md @@ -0,0 +1,66 @@ +# nrpe-external-master interface + +Use this interface to register nagios checks in your charm layers. + +## Purpose + +This interface is designed to interoperate with the +[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm. + +## How to use in your layers + +The event handler for `nrpe-external-master.available` is called with an object +through which you can register your own custom nagios checks, when a relation +is established with `nrpe-external-master:nrpe-external-master`. + +This object provides a method, + +_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_) + +which is called to register a nagios plugin check for your service. + +All arguments are required. + +*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable. + +*name* is the name of the check registered in nagios + +*description* is some text that describes what the check is for and what it does + +*context* is the nagios context name, something that identifies your application + +*unit* is `hookenv.local_unit()` + +The nrpe subordinate installs `check_http`, so you can use it like this: + +``` +@when('nrpe-external-master.available') +def setup_nagios(nagios): + config = hookenv.config() + unit_name = hookenv.local_unit() + nagios.add_check(['/usr/lib/nagios/plugins/check_http', + '-I', '127.0.0.1', '-p', str(config['port']), + '-e', " 200 OK", '-u', '/publickey'], + name="check_http", + description="Verify my awesome service is responding", + context=config["nagios_context"], + unit=unit_name, + ) +``` +If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service. + +Consult the nagios documentation for more information on [how to write your own +plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html) +or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need. + +## Example deployment + +``` +$ juju deploy your-awesome-charm +$ juju deploy nrpe-external-master --config site-nagios.yaml +$ juju add-relation your-awesome-charm nrpe-external-master +``` + +where `site-nagios.yaml` has the necessary configuration settings for the +subordinate to connect to nagios. + diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/__init__.py b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml new file mode 100644 index 0000000..859a423 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/interface.yaml @@ -0,0 +1,3 @@ +name: nrpe-external-master +summary: Nagios interface +version: 1 diff --git a/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py new file mode 100644 index 0000000..b10f501 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/nrpe-external-master/provides.py @@ -0,0 +1,62 @@ +import datetime + +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class NrpeExternalMasterProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:nrpe-external-master}-relation-{joined,changed}') + def changed_nrpe(self): + self.set_state('{relation_name}.available') + + @hook('{provides:nrpe-external-master}-relation-{broken,departed}') + def broken_nrpe(self): + self.remove_state('{relation_name}.available') + + def add_check(self, args, name=None, description=None, context=None, + servicegroups=None, unit=None): + unit = unit.replace('/', '-') + check_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +command[%(check_name)s]=%(check_args)s +""" + service_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service { + use active-service + host_name %(context)s-%(unit_name)s + service_description %(description)s + check_command check_nrpe!%(check_name)s + servicegroups %(servicegroups)s +} +""" + check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name) + with open(check_filename, "w") as fh: + fh.write(check_tmpl % { + 'check_args': ' '.join(args), + 'check_name': name, + }) + service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % ( + unit, name) + with open(service_filename, "w") as fh: + fh.write(service_tmpl % { + 'servicegroups': servicegroups or context, + 'context': context, + 'description': description, + 'check_name': name, + 'unit_name': unit, + }) + + def updated(self): + relation_info = { + 'timestamp': datetime.datetime.now().isoformat(), + } + self.set_remote(**relation_info) + self.remove_state('{relation_name}.available') diff --git a/kubeapi-load-balancer/hooks/relations/public-address/README.md b/kubeapi-load-balancer/hooks/relations/public-address/README.md new file mode 100644 index 0000000..06be3ae --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/public-address/README.md @@ -0,0 +1,59 @@ +# Overview + +This interface layer implements a public address protocol useful for load +balancers and their subordinates. The load balancers (providers) set their +own public address and port, which is then available to the subordinates +(requirers). + +# Usage + +## Provides + +By providing the `public-address` interface, your charm is providing an HTTP +server that can load-balance for another HTTP based service. + +Your charm need only provide the address and port on which it is serving its +content, as soon as the `{relation_name}.available` state is set: + +```python +from charmhelpers.core import hookenv +@when('website.available') +def configure_website(website): + website.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port')) +``` + +## Requires + +By requiring the `public-address` interface, your charm is consuming one or +more HTTP servers, to load-balance a set of servers, etc. + +Your charm should respond to the `{relation_name}.available` state, which +indicates that there is at least one HTTP server connected. + +The `get_addresses_ports()` method returns a list of available addresses and +ports. + +The return value is a list of dicts of the following form: + +```python +[ + { + 'public-address': address_of_host, + 'port': port_for_host, + }, + # ... +] +``` + +A trivial example of handling this interface would be: + +```python +from charmhelpers.core import hookenv +@when('loadbalancer.available') +def update_reverse_proxy_config(loadbalancer): + hosts = loadbalancer.get_addresses_ports() + for host in hosts: + hookenv.log('The loadbalancer for this unit is {}:{}'.format( + host['public-address'], + host['port'])) +``` diff --git a/kubeapi-load-balancer/hooks/relations/public-address/__init__.py b/kubeapi-load-balancer/hooks/relations/public-address/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml b/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml new file mode 100644 index 0000000..c9849e4 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/public-address/interface.yaml @@ -0,0 +1,4 @@ +name: public-address +summary: A basic interface to provide the public address for load balancers. +version: 1 +repo: https://githb.com/juju-solutions/interface-public-address.git diff --git a/kubeapi-load-balancer/hooks/relations/public-address/provides.py b/kubeapi-load-balancer/hooks/relations/public-address/provides.py new file mode 100644 index 0000000..09b9915 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/public-address/provides.py @@ -0,0 +1,60 @@ +import json + +from charms.reactive import toggle_flag +from charms.reactive import Endpoint + + +class PublicAdddressProvides(Endpoint): + + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.available'), + self.is_joined) + + def set_address_port(self, address, port, relation=None): + if relation is None: + # no relation specified, so send the same data to everyone + relations = self.relations + else: + # specific relation given, so only send the data to that one + relations = [relation] + if type(address) is list: + # build 2 lists to zip together that are the same length + length = len(address) + p = [port] * length + combined = zip(address, p) + clients = [{'public-address': a, 'port': p} + for a, p in combined] + # for backwards compatibility, we just send a single entry + # and have an array of dictionaries in a field of that + # entry for the other entries. + first = clients.pop(0) + first['extended_data'] = json.dumps(clients) + for relation in relations: + relation.to_publish_raw.update(first) + else: + for relation in relations: + relation.to_publish_raw.update({'public-address': address, + 'port': port}) + + @property + def requests(self): + return [Request(rel) for rel in self.relations] + + +class Request: + def __init__(self, rel): + self.rel = rel + + @property + def application_name(self): + return self.rel.application_name + + @property + def members(self): + return [(u.received_raw.get('ingress-address', + u.received_raw['private-address']), + u.received_raw.get('port', '6443')) + for u in self.rel.joined_units] + + def set_address_port(self, address, port): + self.rel.endpoint.set_address_port(address, port, self.rel) diff --git a/kubeapi-load-balancer/hooks/relations/public-address/requires.py b/kubeapi-load-balancer/hooks/relations/public-address/requires.py new file mode 100644 index 0000000..467d129 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/public-address/requires.py @@ -0,0 +1,44 @@ +import json + +from charms.reactive import toggle_flag, Endpoint + + +class PublicAddressRequires(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.available'), + len(self.get_addresses_ports()) > 0) + + def set_backend_port(self, port): + """ + Set the port that the backend service is listening on. + + Defaults to 6443 if not set. + """ + for rel in self.relations: + rel.to_publish_raw['port'] = str(port) + + def get_addresses_ports(self): + '''Returns a list of available HTTP providers and their associated + public addresses and ports. + + The return value is a list of dicts of the following form:: + [ + { + 'public-address': address_for_frontend, + 'port': port_for_frontend, + }, + # ... + ] + ''' + hosts = set() + for relation in self.relations: + for unit in relation.joined_units: + data = unit.received_raw + hosts.add((data['public-address'], data['port'])) + if 'extended_data' in data: + for ed in json.loads(data['extended_data']): + hosts.add((ed['public-address'], ed['port'])) + + return [{'public-address': pa, 'port': p} + for pa, p in sorted(host for host in hosts + if None not in host)] diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore b/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore new file mode 100644 index 0000000..93813bc --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/.gitignore @@ -0,0 +1,4 @@ +.tox +__pycache__ +*.pyc +_build diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md new file mode 100644 index 0000000..733da6d --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/README.md @@ -0,0 +1,90 @@ +# Interface tls-certificates + +This is a [Juju][] interface layer that enables a charm which requires TLS +certificates to relate to a charm which can provide them, such as [Vault][] or +[EasyRSA][] + +To get started please read the [Introduction to PKI][] which defines some PKI +terms, concepts and processes used in this document. + +# Example Usage + +Let's say you have a charm which needs a server certificate for a service it +provides to other charms and a client certificate for a database it consumes +from another charm. The charm provides its own service on the `clients` +relation endpoint, and it consumes the database on the `db` relation endpoint. + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +requires: + cert-provider: + interface: tls-certificates +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:tls-certificates +``` + +Then, in your reactive code, add the following, changing `update_certs` to +handle the certificates however your charm needs: + +```python +from charmhelpers.core import hookenv, host +from charms.reactive import endpoint_from_flag + + +@when('cert-provider.ca.changed') +def install_root_ca_cert(): + cert_provider = endpoint_from_flag('cert-provider.ca.available') + host.install_ca_cert(cert_provider.root_ca_cert) + clear_flag('cert-provider.ca.changed') + + +@when('cert-provider.available') +def request_certificates(): + cert_provider = endpoint_from_flag('cert-provider.available') + + # get ingress info + ingress_for_clients = hookenv.network_get('clients')['ingress-addresses'] + ingress_for_db = hookenv.network_get('db')['ingress-addresses'] + + # use first ingress address as primary and any additional as SANs + server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1] + client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1] + + # request a single server and single client cert; note that multiple certs + # of either type can be requested as long as they have unique common names + cert_provider.request_server_cert(server_cn, server_sans) + cert_provider.request_client_cert(client_cn, client_sans) + + +@when('cert-provider.certs.changed') +def update_certs(): + cert_provider = endpoint_from_flag('cert-provider.available') + server_cert = cert_provider.server_certs[0] # only requested one + myserver.update_server_cert(server_cert.cert, server_cert.key) + + client_cert = cert_provider.client_certs[0] # only requested one + myclient.update_client_cert(client_cert.cert, client_cert.key) + clear_flag('cert-provider.certs.changed') +``` + + +# Reference + + * [Requires](docs/requires.md) + * [Provides](docs/provides.md) + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Vault]: https://jujucharms.com/u/openstack-charmers/vault +[EasyRSA]: https://jujucharms.com/u/containers/easyrsa +[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/__init__.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md new file mode 100644 index 0000000..25d0e08 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/common.md @@ -0,0 +1,51 @@ +

CertificateRequest

+ +```python +CertificateRequest(self, unit, cert_type, cert_name, common_name, sans) +``` + +

application_name

+ +Name of the application which the request came from. + +:returns: Name of application +:rtype: str + +

cert

+ + +The cert published for this request, if any. + +

cert_type

+ + +Type of certificate, 'server' or 'client', being requested. + +

resolve_unit_name

+ +```python +CertificateRequest.resolve_unit_name(unit) +``` +Return name of unit associated with this request. + +unit_name should be provided in the relation data to ensure +compatability with cross-model relations. If the unit name +is absent then fall back to unit_name attribute of the +unit associated with this request. + +:param unit: Unit to extract name from +:type unit: charms.reactive.endpoints.RelatedUnit +:returns: Name of unit +:rtype: str + +

Certificate

+ +```python +Certificate(self, cert_type, common_name, cert, key) +``` + +Represents a created certificate and key. + +The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can +be accessed either as properties or as the contents of the dict. + diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md new file mode 100644 index 0000000..c213546 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/provides.md @@ -0,0 +1,212 @@ +

provides

+ + +

TlsProvides

+ +```python +TlsProvides(self, endpoint_name, relation_ids=None) +``` + +The provider's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[all_requests]: provides.md#provides.TlsProvides.all_requests +[new_requests]: provides.md#provides.TlsProvides.new_requests +[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests +[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + +

all_published_certs

+ + +List of all [Certificate][] instances that this provider has published +for all related applications. + +

all_requests

+ + +List of all requests that have been made. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('certs.regen', + 'tls.certs.available') +def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_application_requests

+ + +Filtered view of [new_requests][] that only includes application cert +requests. + +Each will be an instance of [ApplicationCertificateRequest][]. + +Example usage: + +```python +@when('tls.application.certs.requested') +def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_client_requests

+ + +Filtered view of [new_requests][] that only includes client cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.client.certs.requested') +def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_requests

+ + +Filtered view of [all_requests][] that only includes requests that +haven't been handled. + +Each will be an instance of [CertificateRequest][]. + +This collection can also be further filtered by request type using +[new_server_requests][] or [new_client_requests][]. + +Example usage: + +```python +@when('tls.certs.requested') +def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_server_requests

+ + +Filtered view of [new_requests][] that only includes server cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.server.certs.requested') +def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

set_ca

+ +```python +TlsProvides.set_ca(certificate_authority) +``` + +Publish the CA to all related applications. + +

set_chain

+ +```python +TlsProvides.set_chain(chain) +``` + +Publish the chain of trust to all related applications. + +

set_client_cert

+ +```python +TlsProvides.set_client_cert(cert, key) +``` + +Deprecated. This is only for backwards compatibility. + +Publish a globally shared client cert and key. + +

set_server_cert

+ +```python +TlsProvides.set_server_cert(scope, cert, key) +``` + +Deprecated. Use one of the [new_requests][] collections and +`request.set_cert()` instead. + +Set the server cert and key for the request identified by `scope`. + +

set_server_multicerts

+ +```python +TlsProvides.set_server_multicerts(scope) +``` + +Deprecated. Done automatically. + +

add_server_cert

+ +```python +TlsProvides.add_server_cert(scope, cn, cert, key) +``` + +Deprecated. Use `request.set_cert()` instead. + +

get_server_requests

+ +```python +TlsProvides.get_server_requests() +``` + +Deprecated. Use the [new_requests][] or [server_requests][] +collections instead. + +One provider can have many requests to generate server certificates. +Return a map of all server request objects indexed by a unique +identifier. + diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md new file mode 100644 index 0000000..fdec902 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/docs/requires.md @@ -0,0 +1,207 @@ +

requires

+ + +

TlsRequires

+ +```python +TlsRequires(self, endpoint_name, relation_ids=None) +``` + +The client's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + +The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert +[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain +[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert +[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert +[server_certs]: requires.md#requires.TlsRequires.server_certs +[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map +[client_certs]: requires.md#requires.TlsRequires.server_certs + +

application_certs

+ + +List of [Certificate][] instances for all available application certs. + +

client_certs

+ + +List of [Certificate][] instances for all available client certs. + +

client_certs_map

+ + +Mapping of client [Certificate][] instances by their `common_name`. + +

root_ca_cert

+ + +Root CA certificate. + +

root_ca_chain

+ + +The chain of trust for the root CA. + +

server_certs

+ + +List of [Certificate][] instances for all available server certs. + +

server_certs_map

+ + +Mapping of server [Certificate][] instances by their `common_name`. + +

get_ca

+ +```python +TlsRequires.get_ca() +``` + +Return the root CA certificate. + +Same as [root_ca_cert][]. + +

get_chain

+ +```python +TlsRequires.get_chain() +``` + +Return the chain of trust for the root CA. + +Same as [root_ca_chain][]. + +

get_client_cert

+ +```python +TlsRequires.get_client_cert() +``` + +Deprecated. Use [request_client_cert][] and the [client_certs][] +collection instead. + +Return a globally shared client certificate and key. + +

get_server_cert

+ +```python +TlsRequires.get_server_cert() +``` + +Deprecated. Use the [server_certs][] collection instead. + +Return the cert and key of the first server certificate requested. + +

get_batch_requests

+ +```python +TlsRequires.get_batch_requests() +``` + +Deprecated. Use [server_certs_map][] instead. + +Mapping of server [Certificate][] instances by their `common_name`. + +

request_server_cert

+ +```python +TlsRequires.request_server_cert(cn, sans=None, cert_name=None) +``` + +Request a server certificate and key be generated for the given +common name (`cn`) and optional list of alternative names (`sans`). + +The `cert_name` is deprecated and not needed. + +This can be called multiple times to request more than one server +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

add_request_server_cert

+ +```python +TlsRequires.add_request_server_cert(cn, sans) +``` + +Deprecated. Use [request_server_cert][] instead. + +

request_server_certs

+ +```python +TlsRequires.request_server_certs() +``` + +Deprecated. Just use [request_server_cert][]; this does nothing. + +

request_client_cert

+ +```python +TlsRequires.request_client_cert(cn, sans) +``` + +Request a client certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans`). + +This can be called multiple times to request more than one client +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

request_application_cert

+ +```python +TlsRequires.request_application_cert(cn, sans) +``` + +Request an application certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans` ) of this +unit and all peer units. All units will share a single certificates. + diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml b/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml new file mode 100644 index 0000000..beec53b --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/interface.yaml @@ -0,0 +1,6 @@ +name: tls-certificates +summary: | + A Transport Layer Security (TLS) charm layer that uses requires and provides + to exchange certifcates. +version: 1 +repo: https://github.com/juju-solutions/interface-tls-certificates diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs b/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs new file mode 100644 index 0000000..2f2274a --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/make_docs @@ -0,0 +1,23 @@ +#!.tox/py3/bin/python + +import sys +import importlib +from pathlib import Path +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'cert': {'interface': 'tls-certificates'}}, + 'provides': {'cert': {'interface': 'tls-certificates'}}, + } + sys.path.append('..') + sys.modules[''] = importlib.import_module(Path.cwd().name) + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py new file mode 100644 index 0000000..0262baa --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/provides.py @@ -0,0 +1,301 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag + +from .tls_certificates_common import ( + ApplicationCertificateRequest, + CertificateRequest +) + + +class TlsProvides(Endpoint): + """ + The provider's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [all_requests]: provides.md#provides.TlsProvides.all_requests + [new_requests]: provides.md#provides.TlsProvides.new_requests + [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests + [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + toggle_flag(self.expand_name('{endpoint_name}.certs.requested'), + self.new_requests) + toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'), + self.new_client_requests) + toggle_flag( + self.expand_name('{endpoint_name}.application.certs.requested'), + self.new_application_requests) + # For backwards compatibility, set the old "cert" flags as well + toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'), + self.new_client_requests) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + clear_flag(self.expand_name('{endpoint_name}.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.server.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.client.certs.requested')) + clear_flag( + self.expand_name('{endpoint_name}.application.certs.requested')) + + def set_ca(self, certificate_authority): + """ + Publish the CA to all related applications. + """ + for relation in self.relations: + # All the clients get the same CA, so send it to them. + relation.to_publish_raw['ca'] = certificate_authority + + def set_chain(self, chain): + """ + Publish the chain of trust to all related applications. + """ + for relation in self.relations: + # All the clients get the same chain, so send it to them. + relation.to_publish_raw['chain'] = chain + + def set_client_cert(self, cert, key): + """ + Deprecated. This is only for backwards compatibility. + + Publish a globally shared client cert and key. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'client.cert': cert, + 'client.key': key, + }) + + def set_server_cert(self, scope, cert, key): + """ + Deprecated. Use one of the [new_requests][] collections and + `request.set_cert()` instead. + + Set the server cert and key for the request identified by `scope`. + """ + request = self.get_server_requests()[scope] + request.set_cert(cert, key) + + def set_server_multicerts(self, scope): + """ + Deprecated. Done automatically. + """ + pass + + def add_server_cert(self, scope, cn, cert, key): + ''' + Deprecated. Use `request.set_cert()` instead. + ''' + self.set_server_cert(scope, cert, key) + + def get_server_requests(self): + """ + Deprecated. Use the [new_requests][] or [server_requests][] + collections instead. + + One provider can have many requests to generate server certificates. + Return a map of all server request objects indexed by a unique + identifier. + """ + return {req._key: req for req in self.new_server_requests} + + @property + def all_requests(self): + """ + List of all requests that have been made. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('certs.regen', + 'tls.certs.available') + def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + requests = [] + for unit in self.all_joined_units: + # handle older single server cert request + if unit.received_raw['common_name']: + requests.append(CertificateRequest( + unit, + 'server', + unit.received_raw['certificate_name'], + unit.received_raw['common_name'], + unit.received['sans'], + )) + + # handle mutli server cert requests + reqs = unit.received['cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'server', + common_name, + common_name, + req['sans'], + )) + + # handle client cert requests + reqs = unit.received['client_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'client', + common_name, + common_name, + req['sans'], + )) + # handle application cert requests + reqs = unit.received['application_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(ApplicationCertificateRequest( + unit, + 'application', + common_name, + common_name, + req['sans'] + )) + return requests + + @property + def new_requests(self): + """ + Filtered view of [all_requests][] that only includes requests that + haven't been handled. + + Each will be an instance of [CertificateRequest][]. + + This collection can also be further filtered by request type using + [new_server_requests][] or [new_client_requests][]. + + Example usage: + + ```python + @when('tls.certs.requested') + def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.all_requests if not req.is_handled] + + @property + def new_server_requests(self): + """ + Filtered view of [new_requests][] that only includes server cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.server.certs.requested') + def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'server'] + + @property + def new_client_requests(self): + """ + Filtered view of [new_requests][] that only includes client cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.client.certs.requested') + def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'client'] + + @property + def new_application_requests(self): + """ + Filtered view of [new_requests][] that only includes application cert + requests. + + Each will be an instance of [ApplicationCertificateRequest][]. + + Example usage: + + ```python + @when('tls.application.certs.requested') + def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + + :returns: List of certificate requests. + :rtype: [CertificateRequest, ] + """ + return [req for req in self.new_requests + if req.cert_type == 'application'] + + @property + def all_published_certs(self): + """ + List of all [Certificate][] instances that this provider has published + for all related applications. + """ + return [req.cert for req in self.all_requests if req.cert] diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml b/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml new file mode 100644 index 0000000..c568913 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/pydocmd.yml @@ -0,0 +1,19 @@ +site_name: 'TLS Certificates Interface' + +generate: + - requires.md: + - requires + - requires.TlsRequires+ + - provides.md: + - provides + - provides.TlsProvides+ + - common.md: + - tls_certificates_common.CertificateRequest+ + - tls_certificates_common.Certificate+ + +pages: + - Requires: requires.md + - Provides: provides.md + - Common: common.md + +gens_dir: docs diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py new file mode 100644 index 0000000..951f953 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/requires.py @@ -0,0 +1,342 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +import uuid + +from charmhelpers.core import hookenv + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag +from charms.reactive import Endpoint +from charms.reactive import data_changed + +from .tls_certificates_common import Certificate + + +class TlsRequires(Endpoint): + """ + The client's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert + [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain + [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert + [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert + [server_certs]: requires.md#requires.TlsRequires.server_certs + [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map + [client_certs]: requires.md#requires.TlsRequires.server_certs + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + self.relations[0].to_publish_raw['unit_name'] = self._unit_name + prefix = self.expand_name('{endpoint_name}.') + ca_available = self.root_ca_cert + ca_changed = ca_available and data_changed(prefix + 'ca', + self.root_ca_cert) + server_available = self.server_certs + server_changed = server_available and data_changed(prefix + 'servers', + self.server_certs) + client_available = self.client_certs + client_changed = client_available and data_changed(prefix + 'clients', + self.client_certs) + certs_available = server_available or client_available + certs_changed = server_changed or client_changed + + set_flag(prefix + 'available') + toggle_flag(prefix + 'ca.available', ca_available) + toggle_flag(prefix + 'ca.changed', ca_changed) + toggle_flag(prefix + 'server.certs.available', server_available) + toggle_flag(prefix + 'server.certs.changed', server_changed) + toggle_flag(prefix + 'client.certs.available', client_available) + toggle_flag(prefix + 'client.certs.changed', client_changed) + toggle_flag(prefix + 'certs.available', certs_available) + toggle_flag(prefix + 'certs.changed', certs_changed) + # deprecated + toggle_flag(prefix + 'server.cert.available', self.server_certs) + toggle_flag(prefix + 'client.cert.available', self.get_client_cert()) + toggle_flag(prefix + 'batch.cert.available', self.server_certs) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + prefix = self.expand_name('{endpoint_name}.') + clear_flag(prefix + 'available') + clear_flag(prefix + 'ca.available') + clear_flag(prefix + 'ca.changed') + clear_flag(prefix + 'server.certs.available') + clear_flag(prefix + 'server.certs.changed') + clear_flag(prefix + 'client.certs.available') + clear_flag(prefix + 'client.certs.changed') + clear_flag(prefix + 'certs.available') + clear_flag(prefix + 'certs.changed') + # deprecated + clear_flag(prefix + 'server.cert.available') + clear_flag(prefix + 'client.cert.available') + clear_flag(prefix + 'batch.cert.available') + + @property + def _unit_name(self): + return hookenv.local_unit().replace('/', '_') + + @property + def root_ca_cert(self): + """ + Root CA certificate. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['ca'] + + def get_ca(self): + """ + Return the root CA certificate. + + Same as [root_ca_cert][]. + """ + return self.root_ca_cert + + @property + def root_ca_chain(self): + """ + The chain of trust for the root CA. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['chain'] + + def get_chain(self): + """ + Return the chain of trust for the root CA. + + Same as [root_ca_chain][]. + """ + return self.root_ca_chain + + def get_client_cert(self): + """ + Deprecated. Use [request_client_cert][] and the [client_certs][] + collection instead. + + Return a globally shared client certificate and key. + """ + data = self.all_joined_units.received_raw + return (data['client.cert'], data['client.key']) + + def get_server_cert(self): + """ + Deprecated. Use the [server_certs][] collection instead. + + Return the cert and key of the first server certificate requested. + """ + if not self.server_certs: + return (None, None) + cert = self.server_certs[0] + return (cert.cert, cert.key) + + @property + def server_certs(self): + """ + List of [Certificate][] instances for all available server certs. + """ + certs = [] + raw_data = self.all_joined_units.received_raw + json_data = self.all_joined_units.received + + # for backwards compatibility, the first cert goes in its own fields + if self.relations: + common_name = self.relations[0].to_publish_raw['common_name'] + cert = raw_data['{}.server.cert'.format(self._unit_name)] + key = raw_data['{}.server.key'.format(self._unit_name)] + if cert and key: + certs.append(Certificate('server', + common_name, + cert, + key)) + + # subsequent requests go in the collection + field = '{}.processed_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + certs.extend(Certificate('server', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()) + return certs + + @property + def application_certs(self): + """ + List containg the application Certificate cert. + + :returns: A list containing one certificate + :rtype: [Certificate()] + """ + certs = [] + json_data = self.all_joined_units.received + field = '{}.processed_application_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + app_cert_data = certs_data.get('app_data') + if app_cert_data: + certs = [Certificate( + 'server', + 'app_data', + app_cert_data['cert'], + app_cert_data['key'])] + return certs + + @property + def server_certs_map(self): + """ + Mapping of server [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.server_certs} + + def get_batch_requests(self): + """ + Deprecated. Use [server_certs_map][] instead. + + Mapping of server [Certificate][] instances by their `common_name`. + """ + return self.server_certs_map + + @property + def client_certs(self): + """ + List of [Certificate][] instances for all available client certs. + """ + field = '{}.processed_client_requests'.format(self._unit_name) + certs_data = self.all_joined_units.received[field] or {} + return [Certificate('client', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()] + + @property + def client_certs_map(self): + """ + Mapping of client [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.client_certs} + + def request_server_cert(self, cn, sans=None, cert_name=None): + """ + Request a server certificate and key be generated for the given + common name (`cn`) and optional list of alternative names (`sans`). + + The `cert_name` is deprecated and not needed. + + This can be called multiple times to request more than one server + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + to_publish_raw = self.relations[0].to_publish_raw + if to_publish_raw['common_name'] in (None, '', cn): + # for backwards compatibility, first request goes in its own fields + to_publish_raw['common_name'] = cn + to_publish_json['sans'] = sans or [] + cert_name = to_publish_raw.get('certificate_name') or cert_name + if cert_name is None: + cert_name = str(uuid.uuid4()) + to_publish_raw['certificate_name'] = cert_name + else: + # subsequent requests go in the collection + requests = to_publish_json.get('cert_requests', {}) + requests[cn] = {'sans': sans or []} + to_publish_json['cert_requests'] = requests + + def add_request_server_cert(self, cn, sans): + """ + Deprecated. Use [request_server_cert][] instead. + """ + self.request_server_cert(cn, sans) + + def request_server_certs(self): + """ + Deprecated. Just use [request_server_cert][]; this does nothing. + """ + pass + + def request_client_cert(self, cn, sans): + """ + Request a client certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans`). + + This can be called multiple times to request more than one client + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('client_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['client_cert_requests'] = requests + + def request_application_cert(self, cn, sans): + """ + Request an application certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans` ) of this + unit and all peer units. All units will share a single certificates. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('application_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['application_cert_requests'] = requests diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py b/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py new file mode 100644 index 0000000..99a2f8c --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/tls_certificates_common.py @@ -0,0 +1,302 @@ +from charms.reactive import clear_flag, is_data_changed, data_changed + + +class CertificateRequest(dict): + def __init__(self, unit, cert_type, cert_name, common_name, sans): + self._unit = unit + self._cert_type = cert_type + super().__init__({ + 'certificate_name': cert_name, + 'common_name': common_name, + 'sans': sans, + }) + + @property + def _key(self): + return '.'.join((self._unit.relation.relation_id, + self.unit_name, + self.common_name)) + + def resolve_unit_name(self, unit): + """Return name of unit associated with this request. + + unit_name should be provided in the relation data to ensure + compatability with cross-model relations. If the unit name + is absent then fall back to unit_name attribute of the + unit associated with this request. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: Name of unit + :rtype: str + """ + unit_name = unit.received_raw['unit_name'] + if not unit_name: + unit_name = unit.unit_name + return unit_name + + @property + def unit_name(self): + """Name of this unit. + + :returns: Name of unit + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).replace('/', '_') + + @property + def application_name(self): + """Name of the application which the request came from. + + :returns: Name of application + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).split('/')[0] + + @property + def cert_type(self): + """ + Type of certificate, 'server' or 'client', being requested. + """ + return self._cert_type + + @property + def cert_name(self): + return self['certificate_name'] + + @property + def common_name(self): + return self['common_name'] + + @property + def sans(self): + return self['sans'] + + @property + def _publish_key(self): + if self.cert_type == 'server': + return '{}.processed_requests'.format(self.unit_name) + elif self.cert_type == 'client': + return '{}.processed_client_requests'.format(self.unit_name) + raise ValueError('Unknown cert_type: {}'.format(self.cert_type)) + + @property + def _server_cert_key(self): + return '{}.server.cert'.format(self.unit_name) + + @property + def _server_key_key(self): + return '{}.server.key'.format(self.unit_name) + + @property + def _is_top_level_server_cert(self): + return (self.cert_type == 'server' and + self.common_name == self._unit.received_raw['common_name']) + + @property + def cert(self): + """ + The cert published for this request, if any. + """ + cert, key = None, None + if self._is_top_level_server_cert: + tpr = self._unit.relation.to_publish_raw + cert = tpr[self._server_cert_key] + key = tpr[self._server_key_key] + else: + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get(self.common_name, {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + def set_cert(self, cert, key): + rel = self._unit.relation + if self._is_top_level_server_cert: + # backwards compatibility; if this is the cert that was requested + # as a single server cert, set it in the response as the single + # server cert + rel.to_publish_raw.update({ + self._server_cert_key: cert, + self._server_key_key: key, + }) + else: + data = rel.to_publish.get(self._publish_key, {}) + data[self.common_name] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[self._publish_key] = data + if not rel.endpoint.new_server_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.server' + '.cert.requested')) + if not rel.endpoint.new_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.' + 'certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class ApplicationCertificateRequest(CertificateRequest): + """ + A request for an application consistent certificate. + + This is a request for a certificate that works for all units of an + application. All sans and cns are added together to produce one + certificate and the same certificate and key are sent to all the + units of an application. Only one ApplicationCertificateRequest + is needed per application. + """ + + @property + def _key(self): + """Key to identify this cert. + + :returns: cert key + :rtype: str + """ + return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert') + + @property + def cert(self): + """ + The cert published for this request, if any. + + :returns: Certificate + :rtype: Certificate or None + """ + cert, key = None, None + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get('app_data', {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + """Whether the certificate has been handled. + + :returns: If the cert has been handled + :rtype: bool + """ + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + @property + def sans(self): + """Generate a list of all sans from all units of application + + Examine all units of the application and compile a list of + all sans. CNs are treated as addition san entries. + + :returns: List of sans + :rtype: List[str] + """ + _sans = [] + for unit in self._unit.relation.units: + reqs = unit.received['application_cert_requests'] or {} + for cn, req in reqs.items(): + _sans.append(cn) + _sans.extend(req['sans']) + return sorted(list(set(_sans))) + + @property + def _request_key(self): + """Key used to request cert + + :returns: Key used to request cert + :rtype: str + """ + return 'application_cert_requests' + + def derive_publish_key(self, unit=None): + """Derive the application cert publish key for a unit. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: publish key + :rtype: str + """ + if not unit: + unit = self._unit + unit_name = self.resolve_unit_name(unit).replace('/', '_') + return '{}.processed_application_requests'.format(unit_name) + + @property + def _publish_key(self): + """Key used to publish cert + + :returns: Key used to publish cert + :rtype: str + """ + return self.derive_publish_key(unit=self._unit) + + def set_cert(self, cert, key): + """Send the cert and key to all units of the application + + :param cert: TLS Certificate + :type cert: str + :param key: TLS Private Key + :type cert: str + """ + rel = self._unit.relation + for unit in self._unit.relation.units: + pub_key = self.derive_publish_key(unit=unit) + data = rel.to_publish.get( + pub_key, + {}) + data['app_data'] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[pub_key] = data + if not rel.endpoint.new_application_requests: + clear_flag(rel.endpoint.expand_name( + '{endpoint_name}.application.certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class Certificate(dict): + """ + Represents a created certificate and key. + + The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can + be accessed either as properties or as the contents of the dict. + """ + def __init__(self, cert_type, common_name, cert, key): + super().__init__({ + 'cert_type': cert_type, + 'common_name': common_name, + 'cert': cert, + 'key': key, + }) + + @property + def cert_type(self): + return self['cert_type'] + + @property + def common_name(self): + return self['common_name'] + + @property + def cert(self): + return self['cert'] + + @property + def key(self): + return self['key'] diff --git a/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini b/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini new file mode 100644 index 0000000..90de9d3 --- /dev/null +++ b/kubeapi-load-balancer/hooks/relations/tls-certificates/tox.ini @@ -0,0 +1,17 @@ +[tox] +envlist = py3 +skipsdist = true + +[testenv] +basepython=python3 +envdir={toxworkdir}/py3 +deps= + pytest + charms.reactive + pydoc-markdown + +[testenv:docs] +commands=python make_docs + +[flake8] +ignore=E402 diff --git a/kubeapi-load-balancer/hooks/start b/kubeapi-load-balancer/hooks/start new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/start @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/stop b/kubeapi-load-balancer/hooks/stop new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/stop @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/update-status b/kubeapi-load-balancer/hooks/update-status new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/update-status @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/upgrade-charm b/kubeapi-load-balancer/hooks/upgrade-charm new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/upgrade-charm @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/website-relation-broken b/kubeapi-load-balancer/hooks/website-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/website-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/website-relation-changed b/kubeapi-load-balancer/hooks/website-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/website-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/website-relation-departed b/kubeapi-load-balancer/hooks/website-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/website-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/website-relation-joined b/kubeapi-load-balancer/hooks/website-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/website-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/icon.svg b/kubeapi-load-balancer/icon.svg new file mode 100644 index 0000000..7f2998e --- /dev/null +++ b/kubeapi-load-balancer/icon.svg @@ -0,0 +1,412 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + diff --git a/kubeapi-load-balancer/layer.yaml b/kubeapi-load-balancer/layer.yaml new file mode 100644 index 0000000..0f3a5b3 --- /dev/null +++ b/kubeapi-load-balancer/layer.yaml @@ -0,0 +1,49 @@ +"includes": +- "layer:options" +- "layer:basic" +- "interface:nrpe-external-master" +- "layer:status" +- "layer:apt" +- "interface:http" +- "layer:debug" +- "interface:tls-certificates" +- "interface:hacluster" +- "layer:kubernetes-common" +- "layer:metrics" +- "layer:nagios" +- "layer:nginx" +- "layer:tls-client" +- "layer:hacluster" +- "interface:public-address" +- "layer:leadership" +"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"] +"options": + "apt": + "packages": + - "nginx-full" + "version_package": "" + "full_version": !!bool "false" + "keys": [] + "tls-client": + "ca_certificate_path": "/srv/kubernetes/ca.crt" + "server_certificate_path": "" + "server_key_path": "" + "client_certificate_path": "" + "client_key_path": "" + "hacluster": + "binding_address": "website" + "basic": + "packages": [] + "python_packages": [] + "use_venv": !!bool "true" + "include_system_packages": !!bool "false" + "nagios": {} + "status": + "patch-hookenv": !!bool "true" + "nginx": {} + "debug": {} + "kubernetes-common": {} + "leadership": {} + "kubeapi-load-balancer": {} +"repo": "https://github.com/kubernetes/kubernetes.git" +"is": "kubeapi-load-balancer" diff --git a/kubeapi-load-balancer/lib/.gitkeep b/kubeapi-load-balancer/lib/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/lib/charms/apt.py b/kubeapi-load-balancer/lib/charms/apt.py new file mode 100644 index 0000000..14508c4 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/apt.py @@ -0,0 +1,209 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import itertools +import re +import subprocess + +from charmhelpers import fetch +from charmhelpers.core import hookenv, unitdata +from charms import layer, reactive +from charms.layer import status +from charms.reactive import flags + + +__all__ = ['add_source', 'update', 'queue_install', 'install_queued', 'installed', 'purge', 'ensure_package_status'] + + +def add_source(source, key=None): + '''Add an apt source. + + Sets the apt.needs_update flag. + + A source may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + + The package signing key should be an ASCII armoured GPG key. While + GPG key ids are also supported, the retrieval mechanism is insecure. + There is no need to specify the package signing key for PPAs or for + the main Ubuntu archives. + ''' + # Maybe we should remember which sources have been added already + # so we don't waste time re-adding them. Is this time significant? + fetch.add_source(source, key) + reactive.set_flag('apt.needs_update') + + +def queue_install(packages, options=None): + """Queue one or more deb packages for install. + + The `apt.installed.{name}` flag is set once the package is installed. + + If a package has already been installed it will not be reinstalled. + + If a package has already been queued it will not be requeued, and + the install options will not be changed. + + Sets the apt.queued_installs flag. + """ + if isinstance(packages, str): + packages = [packages] + # Filter installed packages. + store = unitdata.kv() + queued_packages = store.getrange('apt.install_queue.', strip=True) + packages = { + package: options + for package in packages + if not (package in queued_packages or reactive.is_flag_set('apt.installed.' + package)) + } + if packages: + unitdata.kv().update(packages, prefix='apt.install_queue.') + reactive.set_flag('apt.queued_installs') + + +def installed(): + '''Return the set of deb packages completed install''' + return set(flag.split('.', 2)[2] for flag in flags.get_flags() if flag.startswith('apt.installed.')) + + +def purge(packages): + """Purge one or more deb packages from the system""" + fetch.apt_purge(packages, fatal=True) + store = unitdata.kv() + store.unsetrange(packages, prefix='apt.install_queue.') + for package in packages: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def update(): + """Update the apt cache. + + Removes the apt.needs_update flag. + """ + status.maintenance('Updating apt cache') + fetch.apt_update(fatal=True) # Friends don't let friends set fatal=False + reactive.clear_flag('apt.needs_update') + + +def install_queued(): + '''Installs queued deb packages. + + Removes the apt.queued_installs flag and sets the apt.installed flag. + + On failure, sets the unit's workload status to 'blocked' and returns + False. Package installs remain queued. + + On success, sets the apt.installed.{packagename} flag for each + installed package and returns True. + ''' + store = unitdata.kv() + queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items()) + + installed = set() + for options, batch in itertools.groupby(queue, lambda x: x[0]): + packages = [b[1] for b in batch] + try: + status.maintenance('Installing {}'.format(','.join(packages))) + fetch.apt_install(packages, options, fatal=True) + store.unsetrange(packages, prefix='apt.install_queue.') + installed.update(packages) + except subprocess.CalledProcessError: + status.blocked('Unable to install packages {}'.format(','.join(packages))) + return False # Without setting reactive flag. + + for package in installed: + reactive.set_flag('apt.installed.{}'.format(package)) + reactive.clear_flag('apt.queued_installs') + + reset_application_version() + + return True + + +def get_package_version(package, full_version=False): + '''Return the version of an installed package. + + If `full_version` is True, returns the full Debian package version. + Otherwise, returns the shorter 'upstream' version number. + ''' + # Don't use fetch.get_upstream_version, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off. + cmd = ['dpkg-query', '--show', r'--showformat=${Version}\n', package] + full = subprocess.check_output(cmd, universal_newlines=True).strip() + if not full_version: + # Attempt to strip off Debian style metadata from the end of the + # version number. + m = re.search(r'^([\d.a-z]+)', full, re.I) + if m is not None: + return m.group(1) + return full + + +def reset_application_version(): + '''Set the Juju application version, per settings in layer.yaml''' + # Reset the application version. We call this after installing + # packages to initialize the version. We also call this every + # hook, incase the version has changed (eg. Landscape upgraded + # the package). + opts = layer.options().get('apt', {}) + pkg = opts.get('version_package') + if pkg and pkg in installed(): + ver = get_package_version(pkg, opts.get('full_version', False)) + hookenv.application_version_set(ver) + + +def ensure_package_status(): + '''Hold or unhold packages per the package_status configuration option. + + All packages installed using this module and handlers are affected. + + An mechanism may be added in the future to override this for a + subset of installed packages. + ''' + packages = installed() + if not packages: + return + config = hookenv.config() + package_status = config.get('package_status') or '' + changed = reactive.data_changed('apt.package_status', (package_status, sorted(packages))) + if changed: + if package_status == 'hold': + hookenv.log('Holding packages {}'.format(','.join(packages))) + fetch.apt_hold(packages) + else: + hookenv.log('Unholding packages {}'.format(','.join(packages))) + fetch.apt_unhold(packages) + reactive.clear_flag('apt.needs_hold') + + +def status_set(state, message): + '''DEPRECATED, set the unit's workload status. + + Set state == None to keep the same state and just change the message. + ''' + if state is None: + state = hookenv.status_get()[0] + if state not in ('active', 'waiting', 'blocked'): + state = 'maintenance' # Guess + status.status_set(state, message) diff --git a/kubeapi-load-balancer/lib/charms/layer/__init__.py b/kubeapi-load-balancer/lib/charms/layer/__init__.py new file mode 100644 index 0000000..a8e0c64 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/__init__.py @@ -0,0 +1,60 @@ +import sys +from importlib import import_module +from pathlib import Path + + +def import_layer_libs(): + """ + Ensure that all layer libraries are imported. + + This makes it possible to do the following: + + from charms import layer + + layer.foo.do_foo_thing() + + Note: This function must be called after bootstrap. + """ + for module_file in Path('lib/charms/layer').glob('*'): + module_name = module_file.stem + if module_name in ('__init__', 'basic', 'execd') or not ( + module_file.suffix == '.py' or module_file.is_dir() + ): + continue + import_module('charms.layer.{}'.format(module_name)) + + +# Terrible hack to support the old terrible interface. +# Try to get people to call layer.options.get() instead so +# that we can remove this garbage. +# Cribbed from https://stackoverfLow.com/a/48100440/4941864 +class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__): + def __call__(self, section=None, layer_file=None): + if layer_file is None: + return self.get(section=section) + else: + return self.get(section=section, + layer_file=Path(layer_file)) + + +def patch_options_interface(): + from charms.layer import options + if sys.version_info.minor >= 5: + options.__class__ = OptionsBackwardsCompatibilityHack + else: + # Py 3.4 doesn't support changing the __class__, so we have to do it + # another way. The last line is needed because we already have a + # reference that doesn't get updated with sys.modules. + name = options.__name__ + hack = OptionsBackwardsCompatibilityHack(name) + hack.get = options.get + sys.modules[name] = hack + sys.modules[__name__].options = hack + + +try: + patch_options_interface() +except ImportError: + # This may fail if pyyaml hasn't been installed yet. But in that + # case, the bootstrap logic will try it again once it has. + pass diff --git a/kubeapi-load-balancer/lib/charms/layer/basic.py b/kubeapi-load-balancer/lib/charms/layer/basic.py new file mode 100644 index 0000000..9122f7c --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/basic.py @@ -0,0 +1,508 @@ +import os +import sys +import re +import shutil +from distutils.version import LooseVersion +from pkg_resources import Requirement +from glob import glob +from subprocess import check_call, check_output, CalledProcessError +from time import sleep + +from charms import layer +from charms.layer.execd import execd_preinstall + + +def _get_subprocess_env(): + env = os.environ.copy() + env['LANG'] = env.get('LANG', 'C.UTF-8') + return env + + +def get_series(): + """ + Return series for a few known OS:es. + Tested as of 2019 november: + * centos6, centos7, rhel6. + * bionic + """ + series = "" + + # Looking for content in /etc/os-release + # works for ubuntu + some centos + if os.path.isfile('/etc/os-release'): + d = {} + with open('/etc/os-release', 'r') as rel: + for l in rel: + if not re.match(r'^\s*$', l): + k, v = l.split('=') + d[k.strip()] = v.strip().replace('"', '') + series = "{ID}{VERSION_ID}".format(**d) + + # Looking for content in /etc/redhat-release + # works for redhat enterprise systems + elif os.path.isfile('/etc/redhat-release'): + with open('/etc/redhat-release', 'r') as redhatlsb: + # CentOS Linux release 7.7.1908 (Core) + line = redhatlsb.readline() + release = int(line.split("release")[1].split()[0][0]) + series = "centos" + str(release) + + # Looking for content in /etc/lsb-release + # works for ubuntu + elif os.path.isfile('/etc/lsb-release'): + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + series = d['DISTRIB_CODENAME'] + + # This is what happens if we cant figure out the OS. + else: + series = "unknown" + return series + + +def bootstrap_charm_deps(): + """ + Set up the base charm dependencies so that the reactive system can run. + """ + # execd must happen first, before any attempt to install packages or + # access the network, because sites use this hook to do bespoke + # configuration and install secrets so the rest of this bootstrap + # and the charm itself can actually succeed. This call does nothing + # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d. + execd_preinstall() + # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts + + series = get_series() + + # OMG?! is build-essentials needed? + ubuntu_packages = ['python3-pip', + 'python3-setuptools', + 'python3-yaml', + 'python3-dev', + 'python3-wheel', + 'build-essential'] + + # I'm not going to "yum group info "Development Tools" + # omitting above madness + centos_packages = ['python3-pip', + 'python3-setuptools', + 'python3-devel', + 'python3-wheel'] + + packages_needed = [] + if 'centos' in series: + packages_needed = centos_packages + else: + packages_needed = ubuntu_packages + + charm_dir = os.environ['JUJU_CHARM_DIR'] + os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin') + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpip = os.path.join(vbin, 'pip') + vpy = os.path.join(vbin, 'python') + hook_name = os.path.basename(sys.argv[0]) + is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped') + is_charm_upgrade = hook_name == 'upgrade-charm' + is_series_upgrade = hook_name == 'post-series-upgrade' + is_post_upgrade = os.path.exists('wheelhouse/.upgraded') + is_upgrade = (not is_post_upgrade and + (is_charm_upgrade or is_series_upgrade)) + if is_bootstrapped and not is_upgrade: + # older subordinates might have downgraded charm-env, so we should + # restore it if necessary + install_or_update_charm_env() + activate_venv() + # the .upgrade file prevents us from getting stuck in a loop + # when re-execing to activate the venv; at this point, we've + # activated the venv, so it's safe to clear it + if is_post_upgrade: + os.unlink('wheelhouse/.upgraded') + return + if os.path.exists(venv): + try: + # focal installs or upgrades prior to PR 160 could leave the venv + # in a broken state which would prevent subsequent charm upgrades + _load_installed_versions(vpip) + except CalledProcessError: + is_broken_venv = True + else: + is_broken_venv = False + if is_upgrade or is_broken_venv: + # All upgrades should do a full clear of the venv, rather than + # just updating it, to bring in updates to Python itself + shutil.rmtree(venv) + if is_upgrade: + if os.path.exists('wheelhouse/.bootstrapped'): + os.unlink('wheelhouse/.bootstrapped') + # bootstrap wheelhouse + if os.path.exists('wheelhouse'): + pre_eoan = series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty', + 'ubuntu16.04', 'xenial', + 'ubuntu18.04', 'bionic') + pydistutils_lines = [ + "[easy_install]\n", + "find_links = file://{}/wheelhouse/\n".format(charm_dir), + "no_index=True\n", + "index_url=\n", # deliberately nothing here; disables it. + ] + if pre_eoan: + pydistutils_lines.append("allow_hosts = ''\n") + with open('/root/.pydistutils.cfg', 'w') as fp: + # make sure that easy_install also only uses the wheelhouse + # (see https://github.com/pypa/pip/issues/410) + fp.writelines(pydistutils_lines) + if 'centos' in series: + yum_install(packages_needed) + else: + apt_install(packages_needed) + from charms.layer import options + cfg = options.get('basic') + # include packages defined in layer.yaml + if 'centos' in series: + yum_install(cfg.get('packages', [])) + else: + apt_install(cfg.get('packages', [])) + # if we're using a venv, set it up + if cfg.get('use_venv'): + if not os.path.exists(venv): + series = get_series() + if series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty'): + apt_install(['python-virtualenv']) + elif 'centos' in series: + yum_install(['python-virtualenv']) + else: + apt_install(['virtualenv']) + cmd = ['virtualenv', '-ppython3', '--never-download', venv] + if cfg.get('include_system_packages'): + cmd.append('--system-site-packages') + check_call(cmd, env=_get_subprocess_env()) + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + pip = vpip + else: + pip = 'pip3' + # save a copy of system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip'): + shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save') + pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm'] + # we bundle these packages to work around bugs in older versions (such + # as https://github.com/pypa/pip/issues/56), but if the system already + # provided a newer version, downgrading it can cause other problems + _update_if_newer(pip, pre_install_pkgs) + # install the rest of the wheelhouse deps (extract the pkg names into + # a set so that we can ignore the pre-install packages and let pip + # choose the best version in case there are multiple from layer + # conflicts) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # Jinja2 3+ relies on MarkupSafe actually being installed prior to + # attempting to be installed from the wheelhouse. Thus, if MarkupSafe + # and/or wheel are in _pkgs, then install them first. + _pre_packages = [p for p in _pkgs if p in ('wheel', 'MarkupSafe')] + _pkgs = [p for p in _pkgs if p not in _pre_packages] + for _pkgs_set in (_pre_packages, _pkgs): + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs_set, _versions) + reinstall_flag = '--force-reinstall' + # if not cfg.get('use_venv', True) and pre_eoan: + if not cfg.get('use_venv', True): + reinstall_flag = '--ignore-installed' + check_call([pip, 'install', '-U', reinstall_flag, '--no-index', + '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs), + env=_get_subprocess_env()) + # re-enable installation from pypi + os.remove('/root/.pydistutils.cfg') + + # install pyyaml for centos7, since, unlike the ubuntu image, the + # default image for centos doesn't include pyyaml; see the discussion: + # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin + if 'centos' in series: + check_call([pip, 'install', '-U', 'pyyaml'], + env=_get_subprocess_env()) + + # install python packages from layer options + if cfg.get('python_packages'): + check_call([pip, 'install', '-U'] + cfg.get('python_packages'), + env=_get_subprocess_env()) + if not cfg.get('use_venv'): + # restore system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip.save'): + shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip') + os.remove('/usr/bin/pip.save') + # setup wrappers to ensure envs are used for scripts + install_or_update_charm_env() + for wrapper in ('charms.reactive', 'charms.reactive.sh', + 'chlp', 'layer_option'): + src = os.path.join('/usr/local/sbin', 'charm-env') + dst = os.path.join('/usr/local/sbin', wrapper) + if not os.path.exists(dst): + os.symlink(src, dst) + if cfg.get('use_venv'): + shutil.copy2('bin/layer_option', vbin) + else: + shutil.copy2('bin/layer_option', '/usr/local/bin/') + # re-link the charm copy to the wrapper in case charms + # call bin/layer_option directly (as was the old pattern) + os.remove('bin/layer_option') + os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option') + # flag us as having already bootstrapped so we don't do it again + open('wheelhouse/.bootstrapped', 'w').close() + if is_upgrade: + # flag us as having already upgraded so we don't do it again + open('wheelhouse/.upgraded', 'w').close() + # Ensure that the newly bootstrapped libs are available. + # Note: this only seems to be an issue with namespace packages. + # Non-namespace-package libs (e.g., charmhelpers) are available + # without having to reload the interpreter. :/ + reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0]) + + +def _load_installed_versions(pip): + pip_freeze = check_output([pip, 'freeze']).decode('utf8') + versions = {} + for pkg_ver in pip_freeze.splitlines(): + try: + req = Requirement.parse(pkg_ver) + except ValueError: + continue + versions.update({ + req.project_name: LooseVersion(ver) + for op, ver in req.specs if op == '==' + }) + return versions + + +def _load_wheelhouse_versions(): + versions = {} + for wheel in glob('wheelhouse/*'): + pkg, ver = os.path.basename(wheel).rsplit('-', 1) + # nb: LooseVersion ignores the file extension + versions[pkg.replace('_', '-')] = LooseVersion(ver) + return versions + + +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + +def _update_if_newer(pip, pkgs): + installed = _load_installed_versions(pip) + wheelhouse = _load_wheelhouse_versions() + for pkg in pkgs: + if pkg not in installed or wheelhouse[pkg] > installed[pkg]: + check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse', + pkg], env=_get_subprocess_env()) + + +def install_or_update_charm_env(): + # On Trusty python3-pkg-resources is not installed + try: + from pkg_resources import parse_version + except ImportError: + apt_install(['python3-pkg-resources']) + from pkg_resources import parse_version + + try: + installed_version = parse_version( + check_output(['/usr/local/sbin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + installed_version = parse_version('0.0.0') + try: + bundled_version = parse_version( + check_output(['bin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + bundled_version = parse_version('0.0.0') + if installed_version < bundled_version: + shutil.copy2('bin/charm-env', '/usr/local/sbin/') + + +def activate_venv(): + """ + Activate the venv if enabled in ``layer.yaml``. + + This is handled automatically for normal hooks, but actions might + need to invoke this manually, using something like: + + # Load modules from $JUJU_CHARM_DIR/lib + import sys + sys.path.append('lib') + + from charms.layer.basic import activate_venv + activate_venv() + + This will ensure that modules installed in the charm's + virtual environment are available to the action. + """ + from charms.layer import options + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpy = os.path.join(vbin, 'python') + use_venv = options.get('basic', 'use_venv') + if use_venv and '.venv' not in sys.executable: + # activate the venv + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + reload_interpreter(vpy) + layer.patch_options_interface() + layer.import_layer_libs() + + +def reload_interpreter(python): + """ + Reload the python interpreter to ensure that all deps are available. + + Newly installed modules in namespace packages sometimes seemt to + not be picked up by Python 3. + """ + os.execve(python, [python] + list(sys.argv), os.environ) + + +def apt_install(packages): + """ + Install apt packages. + + This ensures a consistent set of options that are often missed but + should really be set. + """ + if isinstance(packages, (str, bytes)): + packages = [packages] + + env = _get_subprocess_env() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + cmd = ['apt-get', + '--option=Dpkg::Options::=--force-confold', + '--assume-yes', + 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: # third attempt + raise + try: + # sometimes apt-get update needs to be run + check_call(['apt-get', 'update'], env=env) + except CalledProcessError: + # sometimes it's a dpkg lock issue + pass + sleep(5) + else: + break + + +def yum_install(packages): + """ Installs packages with yum. + This function largely mimics the apt_install function for consistency. + """ + if packages: + env = os.environ.copy() + cmd = ['yum', '-y', 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: + raise + try: + check_call(['yum', 'update'], env=env) + except CalledProcessError: + pass + sleep(5) + else: + break + else: + pass + + +def init_config_states(): + import yaml + from charmhelpers.core import hookenv + from charms.reactive import set_state + from charms.reactive import toggle_state + config = hookenv.config() + config_defaults = {} + config_defs = {} + config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml') + if os.path.exists(config_yaml): + with open(config_yaml) as fp: + config_defs = yaml.safe_load(fp).get('options', {}) + config_defaults = {key: value.get('default') + for key, value in config_defs.items()} + for opt in config_defs.keys(): + if config.changed(opt): + set_state('config.changed') + set_state('config.changed.{}'.format(opt)) + toggle_state('config.set.{}'.format(opt), config.get(opt)) + toggle_state('config.default.{}'.format(opt), + config.get(opt) == config_defaults[opt]) + + +def clear_config_states(): + from charmhelpers.core import hookenv, unitdata + from charms.reactive import remove_state + config = hookenv.config() + remove_state('config.changed') + for opt in config.keys(): + remove_state('config.changed.{}'.format(opt)) + remove_state('config.set.{}'.format(opt)) + remove_state('config.default.{}'.format(opt)) + unitdata.kv().flush() diff --git a/kubeapi-load-balancer/lib/charms/layer/execd.py b/kubeapi-load-balancer/lib/charms/layer/execd.py new file mode 100644 index 0000000..438d9a1 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/execd.py @@ -0,0 +1,114 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of layer-basic, the reactive base layer for Juju. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# This module may only import from the Python standard library. +import os +import sys +import subprocess +import time + +''' +execd/preinstall + +Read the layer-basic docs for more info on how to use this feature. +https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support +''' + + +def default_execd_dir(): + return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_sentinel_path(submodule_path): + module_path = os.path.dirname(submodule_path) + execd_path = os.path.dirname(module_path) + module_name = os.path.basename(module_path) + submodule_name = os.path.basename(submodule_path) + return os.path.join(execd_path, + '.{}_{}.done'.format(module_name, submodule_name)) + + +def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None): + """Run command for each module within execd_dir which defines it.""" + if stderr is None: + stderr = sys.stdout + for submodule_path in execd_submodule_paths(command, execd_dir): + # Only run each execd once. We cannot simply run them in the + # install hook, as potentially storage hooks are run before that. + # We cannot rely on them being idempotent. + sentinel = execd_sentinel_path(submodule_path) + if os.path.exists(sentinel): + continue + + try: + subprocess.check_call([submodule_path], stderr=stderr, + universal_newlines=True) + with open(sentinel, 'w') as f: + f.write('{} ran successfully {}\n'.format(submodule_path, + time.ctime())) + f.write('Removing this file will cause it to be run again\n') + except subprocess.CalledProcessError as e: + # Logs get the details. We can't use juju-log, as the + # output may be substantial and exceed command line + # length limits. + print("ERROR ({}) running {}".format(e.returncode, e.cmd), + file=stderr) + print("STDOUT<>> `get_version('kubelet') + (1, 6, 0) + + """ + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def retry(times, delay_secs): + """Decorator for retrying a method call. + + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """Decorator to wrap the function provided. + + Args: + func: Provided function should return either True od False + + Returns: A callable that would return the last call outcome + + """ + + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + + return _wrapped + + return retry_decorator + + +def calculate_resource_checksum(resource): + """Calculate a checksum for a resource""" + md5 = hashlib.md5() + path = hookenv.resource_get(resource) + if path: + with open(path, "rb") as f: + data = f.read() + md5.update(data) + return md5.hexdigest() + + +def get_resource_checksum_db_key(checksum_prefix, resource): + """Convert a resource name to a resource checksum database key.""" + return checksum_prefix + resource + + +def migrate_resource_checksums(checksum_prefix, snap_resources): + """Migrate resource checksums from the old schema to the new one""" + for resource in snap_resources: + new_key = get_resource_checksum_db_key(checksum_prefix, resource) + if not db.get(new_key): + path = hookenv.resource_get(resource) + if path: + # old key from charms.reactive.helpers.any_file_changed + old_key = "reactive.files_changed." + path + old_checksum = db.get(old_key) + db.set(new_key, old_checksum) + else: + # No resource is attached. Previously, this meant no checksum + # would be calculated and stored. But now we calculate it as if + # it is a 0-byte resource, so let's go ahead and do that. + zero_checksum = hashlib.md5().hexdigest() + db.set(new_key, zero_checksum) + + +def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + hookenv.status_set("maintenance", "Checking resources") + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + old_checksum = db.get(key) + new_checksum = calculate_resource_checksum(resource) + if new_checksum != old_checksum: + return True + return False + + +def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + checksum = calculate_resource_checksum(resource) + db.set(key, checksum) + + +def get_ingress_address(endpoint_name, ignore_addresses=None): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + # if they don't have ingress-addresses they are running a juju that + # doesn't support spaces, so just return the private address + return hookenv.unit_get("private-address") + + excluded_ips = [] + excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"] + for addr in network_info["bind-addresses"]: + for prefix in excluded_interfaces: + if addr["interface-name"].startswith(prefix): + for ip in addr["addresses"]: + excluded_ips.append(ip["value"]) + + ingress_addresses = network_info["ingress-addresses"] + network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips] + + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) + + # Need to prefer non-fan IP addresses due to various issues, e.g. + # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 + # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll + # prioritize those last. Not technically correct, but good enough. + try: + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 + addresses = sorted(addresses, key=sort_key) + except Exception: + hookenv.log(traceback.format_exc()) + + return addresses[0] + + +def get_ingress_address6(endpoint_name): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + return None + + addresses = network_info["ingress-addresses"] + + for addr in addresses: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version == 6: + return str(ip_addr) + else: + return None + + +def service_restart(service_name): + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) + host.service_restart(service_name) + + +def service_start(service_name): + hookenv.log("Starting {0} service.".format(service_name)) + host.service_stop(service_name) + + +def service_stop(service_name): + hookenv.log("Stopping {0} service.".format(service_name)) + host.service_stop(service_name) + + +def arch(): + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" + # Get the package architecture for this system. + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode("utf-8") + return architecture + + +def get_service_ip(service, namespace="kube-system", errors_fatal=True): + try: + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) + except CalledProcessError: + if errors_fatal: + raise + else: + return None + else: + svc = json.loads(output.decode()) + return svc["spec"]["clusterIP"] + + +def kubectl(*args): + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) + return check_output(command) + + +def kubectl_success(*args): + """Runs kubectl with the given args. Returns True if successful, False if + not.""" + try: + kubectl(*args) + return True + except CalledProcessError: + return False + + +def kubectl_manifest(operation, manifest): + """Wrap the kubectl creation command when using filepath resources + :param operation - one of get, create, delete, replace + :param manifest - filepath to the manifest + """ + # Deletions are a special case + if operation == "delete": + # Ensure we immediately remove requested resources with --now + return kubectl_success(operation, "-f", manifest, "--now") + else: + # Guard against an error re-creating the same manifest multiple times + if operation == "create": + # If we already have the definition, its probably safe to assume + # creation was true. + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) + return True + # Execute the requested command that did not match any of the special + # cases above + return kubectl_success(operation, "-f", manifest) + + +def get_node_name(): + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": + return getfqdn().lower() + else: + return gethostname().lower() + + +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied + arguments for values of the Kubernetes server, CA, key, certificate, user + context and cluster.""" + if not key and not certificate and not password and not token: + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") + + # Create the config file with the address of the master server. + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) + # Delete old users + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) + # Create the credentials using the client flags. + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) + + if key and certificate: + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) + # Create a default context with the cluster. + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) + # Make the config use this new context. + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) + if keystone: + # create keystone user + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) + check_call(split(cmd)) + # create keystone context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) + check_call(split(cmd)) + # use keystone context + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) + check_call(split(cmd)) + # manually add exec command until kubectl can do it for us + with open(new_kubeconfig, "r") as f: + content = f.read() + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user + user: + exec: + command: "/snap/bin/client-keystone-auth" + apiVersion: "client.authentication.k8s.io/v1beta1" +""", + ) + with open(new_kubeconfig, "w") as f: + f.write(content) + if aws_iam_cluster_id: + # create aws-iam context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) + + # append a user for aws-iam + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) + + # not going to use aws-iam context by default since we don't have + # the desired arn. This will make the config not usable if copied. + + # cmd = 'kubectl config --kubeconfig={0} ' \ + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) + # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) + + +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, "").split() + args = {} + + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + + return args + + +def configure_kubernetes_service(key, service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = key + service + prev_snap_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + args.update(base_args) + args.update(extra_args) + + # CIS benchmark action may inject kv config to pass failing tests. Merge + # these after the func args as they should take precedence. + cis_args_key = "cis-" + service + cis_args = db.get(cis_args_key) or {} + args.update(cis_args) + + # Remove any args with 'None' values (all k8s args are 'k=v') and + # construct an arg string for use by 'snap set'. + args = {k: v for k, v in args.items() if v is not None} + args = ['--%s="%s"' % arg for arg in args.items()] + args = " ".join(args) + + snap_opts = {} + for arg in prev_snap_args: + # remove previous args by setting to null + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] + + cmd = ["snap", "set", service] + snap_opts + check_call(cmd) + + # Now that we've started doing snap configuration through the "args" + # option, we should never need to clear previous args again. + db.set(prev_args_key, {}) + + +def _snap_common_path(component): + return Path("/var/snap/{}/common".format(component)) + + +def cloud_config_path(component): + return _snap_common_path(component) / "cloud-config.conf" + + +def _gcp_creds_path(component): + return _snap_common_path(component) / "gcp-creds.json" + + +def _daemon_env_path(component): + return _snap_common_path(component) / "environment" + + +def _cloud_endpoint_ca_path(component): + return _snap_common_path(component) / "cloud-endpoint-ca.crt" + + +def encryption_config_path(): + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" + + +def write_gcp_snap_config(component): + # gcp requires additional credentials setup + gcp = endpoint_from_flag("endpoint.gcp.ready") + creds_path = _gcp_creds_path(component) + with creds_path.open("w") as fp: + os.fchmod(fp.fileno(), 0o600) + fp.write(gcp.credentials) + + # create a cloud-config file that sets token-url to nil to make the + # services use the creds env var instead of the metadata server, as + # well as making the cluster multizone + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) + + daemon_env_path = _daemon_env_path(component) + if daemon_env_path.exists(): + daemon_env = daemon_env_path.read_text() + if not daemon_env.endswith("\n"): + daemon_env += "\n" + else: + daemon_env = "" + if gcp_creds_env_key not in daemon_env: + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) + daemon_env_path.parent.mkdir(parents=True, exist_ok=True) + daemon_env_path.write_text(daemon_env) + + +def generate_openstack_cloud_config(): + # openstack requires additional credentials setup + openstack = endpoint_from_flag("endpoint.openstack.ready") + + lines = [ + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), + ] + if openstack.endpoint_tls_ca: + lines.append("ca-file = /etc/config/endpoint-ca.cert") + + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) + + if openstack.has_octavia in (True, None): + # Newer integrator charm will detect whether underlying OpenStack has + # Octavia enabled so we can set this intelligently. If we're still + # related to an older integrator, though, default to assuming Octavia + # is available. + lines.append("use-octavia = true") + else: + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") + if openstack.subnet_id: + lines.append("subnet-id = {}".format(openstack.subnet_id)) + if openstack.floating_network_id: + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) + if openstack.lb_method: + lines.append("lb-method = {}".format(openstack.lb_method)) + if openstack.internal_lb: + lines.append("internal-lb = true") + if openstack.manage_security_groups: + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") + if openstack.bs_version is not None: + lines.append("bs-version = {}".format(openstack.bs_version)) + if openstack.trust_device_path is not None: + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) + if openstack.ignore_volume_az is not None: + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" + + +def write_azure_snap_config(component): + azure = endpoint_from_flag("endpoint.azure.ready") + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) + + +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): + kube_proxy_opts = {} + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" + num_apis = len(api_servers) + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() + if bind_address: + kube_proxy_opts["bind-address"] = bind_address + elif is_ipv6(cluster_cidr): + kube_proxy_opts["bind-address"] = "::" + + if host.is_container(): + kube_proxy_opts["conntrack-max-per-core"] = "0" + + feature_gates = [] + + if is_dual_stack(cluster_cidr): + feature_gates.append("IPv6DualStack=true") + + if is_state("endpoint.aws.ready"): + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.azure.ready"): + feature_gates.append("CSIMigrationAzureDisk=false") + + kube_proxy_opts["feature-gates"] = ",".join(feature_gates) + + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) + + +def get_unit_number(): + return int(hookenv.local_unit().split("/")[1]) + + +def cluster_cidr(): + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") + if not cni: + return None + config = hookenv.config() + if "default-cni" in config: + # master + default_cni = config["default-cni"] + else: + # worker + kube_control = endpoint_from_flag("kube-control.dns.available") + if not kube_control: + return None + default_cni = kube_control.get_default_cni() + return cni.get_config(default=default_cni)["cidr"] + + +def is_dual_stack(cidrs): + """Detect IPv4/IPv6 dual stack from CIDRs""" + return {net.version for net in get_networks(cidrs)} == {4, 6} + + +def is_ipv4(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv4_network(cidrs) is not None + + +def is_ipv6(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv6_network(cidrs) is not None + + +def is_ipv6_preferred(cidrs): + """Detect if IPv6 is preffered from CIDRs""" + return get_networks(cidrs)[0].version == 6 + + +def get_networks(cidrs): + """Convert a comma-separated list of CIDRs to a list of networks.""" + if not cidrs: + return [] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] + + +def get_ipv4_network(cidrs): + """Get the IPv4 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(4) + + +def get_ipv6_network(cidrs): + """Get the IPv6 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(6) + + +def enable_ipv6_forwarding(): + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) + + +def _as_address(addr_str): + try: + return ipaddress.ip_address(addr_str) + except ValueError: + return None + + +def get_bind_addrs(ipv4=True, ipv6=True): + try: + output = check_output(["ip", "-j", "-br", "addr", "show", "scope", "global"]) + except CalledProcessError: + # stderr will have any details, and go to the log + hookenv.log("Unable to determine global addresses", hookenv.ERROR) + return [] + + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") + accept_versions = set() + if ipv4: + accept_versions.add(4) + if ipv6: + accept_versions.add(6) + + addrs = [] + for addr in json.loads(output.decode("utf8")): + if addr["operstate"].upper() != "UP" or any( + addr["ifname"].startswith(prefix) for prefix in ignore_interfaces + ): + continue + + for ifc in addr["addr_info"]: + local_addr = _as_address(ifc.get("local")) + if local_addr and local_addr.version in accept_versions: + addrs.append(str(local_addr)) + + return addrs + + +class InvalidVMwareHost(Exception): + pass + + +def _get_vmware_uuid(): + serial_id_file = "/sys/class/dmi/id/product_serial" + # The serial id from VMWare VMs comes in following format: + # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 + try: + with open(serial_id_file, "r") as f: + serial_string = f.read().strip() + if "VMware-" not in serial_string: + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) + raise InvalidVMwareHost + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) + uuid = "%s-%s-%s-%s-%s" % ( + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) + except IOError as err: + hookenv.log("Unable to read UUID from sysfs: {}".format(err)) + uuid = "UNKNOWN" + + return uuid + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None + + +def get_node_ip(): + """Determines the preferred NodeIP value for this node.""" + cidr = cluster_cidr() + if not cidr: + return None + if is_ipv6_preferred(cidr): + return get_ingress_address6("kube-control") + else: + return get_ingress_address("kube-control") + + +def merge_kubelet_extra_config(config, extra_config): + """Updates config to include the contents of extra_config. This is done + recursively to allow deeply nested dictionaries to be merged. + + This is destructive: it modifies the config dict that is passed in. + """ + for k, extra_config_value in extra_config.items(): + if isinstance(extra_config_value, dict): + config_value = config.setdefault(k, {}) + merge_kubelet_extra_config(config_value, extra_config_value) + else: + config[k] = extra_config_value + + +def workaround_lxd_kernel_params(): + """ + Workaround for kubelet not starting in LXD when kernel params are not set + to the desired values. + """ + if host.is_container(): + hookenv.log("LXD detected, faking kernel params via bind mounts") + root_dir = "/root/cdk/lxd-kernel-params" + os.makedirs(root_dir, exist_ok=True) + # Kernel params taken from: + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/kubelet/cm/container_manager_linux.go#L421-L426 + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/util/sysctl/sysctl.go#L30-L64 + params = { + "vm.overcommit_memory": 1, + "vm.panic_on_oom": 0, + "kernel.panic": 10, + "kernel.panic_on_oops": 1, + "kernel.keys.root_maxkeys": 1000000, + "kernel.keys.root_maxbytes": 1000000 * 25, + } + for param, param_value in params.items(): + fake_param_path = root_dir + "/" + param + with open(fake_param_path, "w") as f: + f.write(str(param_value)) + real_param_path = "/proc/sys/" + param.replace(".", "/") + host.fstab_add(fake_param_path, real_param_path, "none", "bind") + subprocess.check_call(["mount", "-a"]) + else: + hookenv.log("LXD not detected, not faking kernel params") + + +def get_sandbox_image_uri(registry): + return "{}/pause:3.6".format(registry) + + +def configure_kubelet(dns_domain, dns_ip, registry, taints=None, has_xcp=False): + kubelet_opts = {} + kubelet_opts["kubeconfig"] = kubelet_kubeconfig_path + kubelet_opts["v"] = "0" + kubelet_opts["logtostderr"] = "true" + kubelet_opts["node-ip"] = get_node_ip() + + container_runtime = endpoint_from_flag("endpoint.container-runtime.available") + + kubelet_opts["container-runtime"] = container_runtime.get_runtime() + if kubelet_opts["container-runtime"] == "remote": + kubelet_opts["container-runtime-endpoint"] = container_runtime.get_socket() + + feature_gates = {} + + kubelet_cloud_config_path = cloud_config_path("kubelet") + if has_xcp: + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.aws.ready"): + kubelet_opts["cloud-provider"] = "aws" + feature_gates["CSIMigrationAWS"] = False + elif is_state("endpoint.gcp.ready"): + kubelet_opts["cloud-provider"] = "gce" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + feature_gates["CSIMigrationGCE"] = False + elif is_state("endpoint.openstack.ready"): + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.vsphere.joined"): + # vsphere just needs to be joined on the worker (vs 'ready') + kubelet_opts["cloud-provider"] = "vsphere" + # NB: vsphere maps node product-id to its uuid (no config file needed). + uuid = _get_vmware_uuid() + kubelet_opts["provider-id"] = "vsphere://{}".format(uuid) + elif is_state("endpoint.azure.ready"): + azure = endpoint_from_flag("endpoint.azure.ready") + kubelet_opts["cloud-provider"] = "azure" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + kubelet_opts["provider-id"] = azure.vm_id + feature_gates["CSIMigrationAzureDisk"] = False + + # Put together the KubeletConfiguration data + kubelet_config = { + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + "address": "0.0.0.0", + "authentication": { + "anonymous": {"enabled": False}, + "x509": {"clientCAFile": str(ca_crt_path)}, + }, + # NB: authz webhook config tells the kubelet to ask the api server + # if a request is authorized; it is not related to the authn + # webhook config of the k8s master services. + "authorization": {"mode": "Webhook"}, + "clusterDomain": dns_domain, + "failSwapOn": False, + "port": 10250, + "protectKernelDefaults": True, + "readOnlyPort": 0, + "tlsCertFile": str(server_crt_path), + "tlsPrivateKeyFile": str(server_key_path), + } + if dns_ip: + kubelet_config["clusterDNS"] = [dns_ip] + + # Handle feature gates + if get_version("kubelet") >= (1, 19): + # NB: required for CIS compliance + feature_gates["RotateKubeletServerCertificate"] = True + if is_state("kubernetes-worker.gpu.enabled"): + feature_gates["DevicePlugins"] = True + if feature_gates: + kubelet_config["featureGates"] = feature_gates + if is_dual_stack(cluster_cidr()): + feature_gates = kubelet_config.setdefault("featureGates", {}) + feature_gates["IPv6DualStack"] = True + + # Workaround for DNS on bionic + # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 + resolv_path = os.path.realpath("/etc/resolv.conf") + if resolv_path == "/run/systemd/resolve/stub-resolv.conf": + kubelet_config["resolvConf"] = "/run/systemd/resolve/resolv.conf" + + # Add kubelet-extra-config. This needs to happen last so that it + # overrides any config provided by the charm. + kubelet_extra_config = hookenv.config("kubelet-extra-config") + kubelet_extra_config = yaml.safe_load(kubelet_extra_config) + merge_kubelet_extra_config(kubelet_config, kubelet_extra_config) + + # Render the file and configure Kubelet to use it + os.makedirs("/root/cdk/kubelet", exist_ok=True) + with open("/root/cdk/kubelet/config.yaml", "w") as f: + f.write("# Generated by kubernetes-worker charm, do not edit\n") + yaml.dump(kubelet_config, f) + kubelet_opts["config"] = "/root/cdk/kubelet/config.yaml" + + # If present, ensure kubelet gets the pause container from the configured + # registry. When not present, kubelet uses a default image location + # (currently k8s.gcr.io/pause:3.4.1). + if registry: + kubelet_opts["pod-infra-container-image"] = get_sandbox_image_uri(registry) + + if taints: + kubelet_opts["register-with-taints"] = ",".join(taints) + + workaround_lxd_kernel_params() + + configure_kubernetes_service( + "kubernetes-common.prev-args.", "kubelet", kubelet_opts, "kubelet-extra-args" + ) + + +def configure_default_cni(default_cni): + """Set the default CNI configuration to be used by CNI clients + (kubelet, containerd). + + CNI clients choose whichever CNI config in /etc/cni/net.d/ is + alphabetically first, so we accomplish this by creating a file named + /etc/cni/net.d/05-default.conflist, which is alphabetically earlier than + typical CNI config names, e.g. 10-flannel.conflist and 10-calico.conflist + + The created 05-default.conflist file is a symlink to whichever CNI config + is actually going to be used. + """ + # Clean up current default + cni_conf_dir = "/etc/cni/net.d" + for filename in os.listdir(cni_conf_dir): + if filename.startswith("05-default."): + os.remove(cni_conf_dir + "/" + filename) + + # Set new default + cni = endpoint_from_flag("cni.available") + cni_conf = cni.get_config(default=default_cni) + source = cni_conf["cni-conf-file"] + dest = cni_conf_dir + "/" + "05-default." + source.split(".")[-1] + os.symlink(source, dest) diff --git a/kubeapi-load-balancer/lib/charms/layer/nagios.py b/kubeapi-load-balancer/lib/charms/layer/nagios.py new file mode 100644 index 0000000..f6ad998 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/nagios.py @@ -0,0 +1,60 @@ +from pathlib import Path + +NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins' + + +def install_nagios_plugin_from_text(text, plugin_name): + """ Install a nagios plugin. + + Args: + text: Plugin source code (str) + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + # we could complain here, test the files are the same contents, or + # just bail. Idempotency is a big deal in Juju, so I'd like to be + # ok with being called with the same file multiple times, but we + # certainly want to catch the case where multiple layers are using + # the same filename for their nagios checks. + dest = dest_path.read_text() + if dest == text: + # same file + return dest_path + # different file contents! + # maybe someone changed options or something so we need to write + # it again + + dest_path.write_text(text) + dest_path.chmod(0o755) + + return dest_path + + +def install_nagios_plugin_from_file(source_file_path, plugin_name): + """ Install a nagios plugin. + + Args: + source_file_path: Path to plugin source file + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + + return install_nagios_plugin_from_text(Path(source_file_path).read_text(), + plugin_name) + + +def remove_nagios_plugin(plugin_name): + """ Remove a nagios plugin. + + Args: + plugin_name: Name of the plugin in nagios + + Returns: None + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + dest_path.unlink() diff --git a/kubeapi-load-balancer/lib/charms/layer/nginx.py b/kubeapi-load-balancer/lib/charms/layer/nginx.py new file mode 100644 index 0000000..7194400 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/nginx.py @@ -0,0 +1,74 @@ +from charmhelpers.core.templating import render +from charmhelpers.core import hookenv +from charmhelpers.core import host + +import toml +import os + + +def load_site(): + if not os.path.isfile('site.toml'): + return {} + + with open('site.toml') as fp: + conf = toml.loads(fp.read()) + + return conf + + +def get_app_path(): + site = load_site() + if 'app_path' in site: + return site['app_path'] + return '/srv/app' + + +def remove_default_site(): + """ + Remove the default enabled + site. + + :return: Boolean + """ + site_path = '/etc/nginx/sites-enabled/default' + if os.path.isfile(site_path): + os.remove(site_path) + host.service_reload('nginx', restart_on_failure=True) + return True + + return False + + +def configure_site(site, template, **kwargs): + """ configures vhost + + Arguments: + site: Site name + template: template to process in templates/ + **kwargs: additional dict items to append to template variables exposed + through the site.toml + """ + hookenv.status_set('maintenance', 'Configuring site {}'.format(site)) + + config = hookenv.config() + context = load_site() + context['host'] = config['host'] + context['port'] = config['port'] + context.update(**kwargs) + conf_path = '/etc/nginx/sites-available/{}'.format(site) + if os.path.exists(conf_path): + os.remove(conf_path) + render(source=template, + target=conf_path, + context=context) + + symlink_path = '/etc/nginx/sites-enabled/{}'.format(site) + if os.path.exists(symlink_path): + os.unlink(symlink_path) + os.symlink(conf_path, symlink_path) + hookenv.log('Wrote vhost config {} to {}'.format(context, template), + 'info') + + if not remove_default_site(): + host.service_reload('nginx', restart_on_failure=True) + hookenv.status_set('active', '') diff --git a/kubeapi-load-balancer/lib/charms/layer/options.py b/kubeapi-load-balancer/lib/charms/layer/options.py new file mode 100644 index 0000000..d3f273f --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/options.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path + +import yaml + + +_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.')) +_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml' +_CACHE = {} + + +def get(section=None, option=None, layer_file=_DEFAULT_FILE): + if option and not section: + raise ValueError('Cannot specify option without section') + + layer_file = (_CHARM_PATH / layer_file).resolve() + if layer_file not in _CACHE: + with layer_file.open() as fp: + _CACHE[layer_file] = yaml.safe_load(fp.read()) + + data = _CACHE[layer_file].get('options', {}) + if section: + data = data.get(section, {}) + if option: + data = data.get(option) + return data diff --git a/kubeapi-load-balancer/lib/charms/layer/status.py b/kubeapi-load-balancer/lib/charms/layer/status.py new file mode 100644 index 0000000..95b2997 --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/status.py @@ -0,0 +1,189 @@ +import inspect +import errno +import subprocess +import yaml +from enum import Enum +from functools import wraps +from pathlib import Path + +from charmhelpers.core import hookenv +from charms import layer + + +_orig_call = subprocess.call +_statuses = {'_initialized': False, + '_finalized': False} + + +class WorkloadState(Enum): + """ + Enum of the valid workload states. + + Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + """ + # note: order here determines precedence of state + MAINTENANCE = 'maintenance' + BLOCKED = 'blocked' + WAITING = 'waiting' + ACTIVE = 'active' + + +def maintenance(message): + """ + Set the status to the `MAINTENANCE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.MAINTENANCE, message) + + +def maint(message): + """ + Shorthand alias for + [maintenance](status.md#charms.layer.status.maintenance). + + # Parameters + `message` (str): Message to convey to the operator. + """ + maintenance(message) + + +def blocked(message): + """ + Set the status to the `BLOCKED` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.BLOCKED, message) + + +def waiting(message): + """ + Set the status to the `WAITING` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.WAITING, message) + + +def active(message): + """ + Set the status to the `ACTIVE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.ACTIVE, message) + + +def status_set(workload_state, message): + """ + Set the status to the given workload state with a message. + + # Parameters + `workload_state` (WorkloadState or str): State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. + `message` (str): Message to convey to the operator. + """ + if not isinstance(workload_state, WorkloadState): + workload_state = WorkloadState(workload_state) + if workload_state is WorkloadState.MAINTENANCE: + _status_set_immediate(workload_state, message) + return + layer = _find_calling_layer() + _statuses.setdefault(workload_state, []).append((layer, message)) + if not _statuses['_initialized'] or _statuses['_finalized']: + # We either aren't initialized, so the finalizer may never be run, + # or the finalizer has already run, so it won't run again. In either + # case, we need to manually invoke it to ensure the status gets set. + _finalize() + + +def _find_calling_layer(): + for frame in inspect.stack(): + # switch to .filename when trusty (Python 3.4) is EOL + fn = Path(frame[1]) + if fn.parent.stem not in ('reactive', 'layer', 'charms'): + continue + layer_name = fn.stem + if layer_name == 'status': + continue # skip our own frames + return layer_name + return None + + +def _initialize(): + if not _statuses['_initialized']: + if layer.options.get('status', 'patch-hookenv'): + _patch_hookenv() + hookenv.atexit(_finalize) + _statuses['_initialized'] = True + + +def _finalize(): + if _statuses['_initialized']: + # If we haven't been initialized, we can't truly be finalized. + # This makes things more efficient if an action sets a status + # but subsequently starts the reactive bus. + _statuses['_finalized'] = True + charm_name = hookenv.charm_name() + charm_dir = Path(hookenv.charm_dir()) + with charm_dir.joinpath('layer.yaml').open() as fp: + includes = yaml.safe_load(fp.read()).get('includes', []) + layer_order = includes + [charm_name] + + for workload_state in WorkloadState: + if workload_state not in _statuses: + continue + if not _statuses[workload_state]: + continue + + def _get_key(record): + layer_name, message = record + if layer_name in layer_order: + return layer_order.index(layer_name) + else: + return 0 + + sorted_statuses = sorted(_statuses[workload_state], key=_get_key) + layer_name, message = sorted_statuses[-1] + _status_set_immediate(workload_state, message) + break + + +def _status_set_immediate(workload_state, message): + workload_state = workload_state.value + try: + hookenv.log('status-set: {}: {}'.format(workload_state, message), + hookenv.INFO) + ret = _orig_call(['status-set', workload_state, message]) + if ret == 0: + return + except OSError as e: + # ignore status-set not available on older controllers + if e.errno != errno.ENOENT: + raise + + +def _patch_hookenv(): + # we can't patch hookenv.status_set directly because other layers may have + # already imported it into their namespace, so we have to patch sp.call + subprocess.call = _patched_call + + +@wraps(_orig_call) +def _patched_call(cmd, *args, **kwargs): + if not isinstance(cmd, list) or cmd[0] != 'status-set': + return _orig_call(cmd, *args, **kwargs) + _, workload_state, message = cmd + status_set(workload_state, message) + return 0 # make hookenv.status_set not emit spurious failure logs diff --git a/kubeapi-load-balancer/lib/charms/layer/tls_client.py b/kubeapi-load-balancer/lib/charms/layer/tls_client.py new file mode 100644 index 0000000..b2980dc --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/layer/tls_client.py @@ -0,0 +1,61 @@ +# Copyright 2016-2017 Canonical Ltd. +# +# This file is part of the tls-client layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import log +from charmhelpers.core import unitdata + +from charms.reactive import remove_state +from charms.reactive import endpoint_from_flag + + +def reset_certificate_write_flag(cert_type): + """ + Reset the certificate written flag so notification will work on the next + write cert_type must be 'server', 'client', or 'ca' to indicate type of + certificate + """ + if cert_type not in ['server', 'client', 'ca']: + log('Unknown certificate type!') + else: + remove_state('tls_client.{0}.certificate.written'.format(cert_type)) + + +def request_server_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_server_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('server', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) + + +def request_client_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_client_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('client', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) diff --git a/kubeapi-load-balancer/lib/charms/leadership.py b/kubeapi-load-balancer/lib/charms/leadership.py new file mode 100644 index 0000000..d2a95fa --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.reactive import not_unless + + +__all__ = ['leader_get', 'leader_set'] + + +@not_unless('leadership.is_leader') +def leader_set(*args, **kw): + '''Change leadership settings, per charmhelpers.core.hookenv.leader_set. + + Settings may either be passed in as a single dictionary, or using + keyword arguments. All values must be strings. + + The leadership.set.{key} reactive state will be set while the + leadership hook environment setting remains set. + + Changed leadership settings will set the leadership.changed.{key} + and leadership.changed states. These states will remain set until + the following hook. + + These state changes take effect immediately on the leader, and + in future hooks run on non-leaders. In this way both leaders and + non-leaders can share handlers, waiting on these states. + ''' + if args: + if len(args) > 1: + raise TypeError('leader_set() takes 1 positional argument but ' + '{} were given'.format(len(args))) + else: + settings = dict(args[0]) + else: + settings = {} + settings.update(kw) + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + + for key, value in settings.items(): + if value != previous.get(key): + reactive.set_state('leadership.changed.{}'.format(key)) + reactive.set_state('leadership.changed') + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + hookenv.leader_set(settings) + unitdata.kv().update(settings, prefix='leadership.settings.') + + +def leader_get(attribute=None): + '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.''' + return hookenv.leader_get(attribute) diff --git a/kubeapi-load-balancer/lib/debug_script.py b/kubeapi-load-balancer/lib/debug_script.py new file mode 100644 index 0000000..e156924 --- /dev/null +++ b/kubeapi-load-balancer/lib/debug_script.py @@ -0,0 +1,8 @@ +import os + +dir = os.environ["DEBUG_SCRIPT_DIR"] + + +def open_file(path, *args, **kwargs): + """ Open a file within the debug script dir """ + return open(os.path.join(dir, path), *args, **kwargs) diff --git a/kubeapi-load-balancer/lib/nginxlib.py b/kubeapi-load-balancer/lib/nginxlib.py new file mode 100644 index 0000000..1bd5e73 --- /dev/null +++ b/kubeapi-load-balancer/lib/nginxlib.py @@ -0,0 +1,4 @@ +from warnings import warn +from charms.layer.nginx import * # noqa + +warn('nginxlib is being deprecated, use charms.layer.nginx instead') diff --git a/kubeapi-load-balancer/make_docs b/kubeapi-load-balancer/make_docs new file mode 100644 index 0000000..dcd4c1f --- /dev/null +++ b/kubeapi-load-balancer/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import os +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + sys.path.insert(0, 'lib') + sys.path.insert(1, 'reactive') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') + if os.path.exists('.unit-state.db'): + os.remove('.unit-state.db') diff --git a/kubeapi-load-balancer/manifest.yaml b/kubeapi-load-balancer/manifest.yaml new file mode 100644 index 0000000..ae47caa --- /dev/null +++ b/kubeapi-load-balancer/manifest.yaml @@ -0,0 +1,27 @@ +analysis: + attributes: + - name: language + result: python + - name: framework + result: reactive +bases: +- architectures: + - amd64 + - s390x + - arm64 + channel: '20.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '22.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '18.04' + name: ubuntu +charmtool-started-at: '2023-05-22T18:49:53.204468Z' +charmtool-version: 2.8.2 diff --git a/kubeapi-load-balancer/metadata.yaml b/kubeapi-load-balancer/metadata.yaml new file mode 100644 index 0000000..be858c8 --- /dev/null +++ b/kubeapi-load-balancer/metadata.yaml @@ -0,0 +1,40 @@ +"name": "kubeapi-load-balancer" +"summary": |- + Nginx Load Balancer +"maintainers": +- "Tim Van Steenburgh " +- "George Kraft " +- "Rye Terrell " +- "Konstantinos Tsakalozos " +- "Charles Butler " +- "Matthew Bruzek " +"description": | + A round robin Nginx load balancer to distribute traffic for kubernetes apiservers. +"tags": +- "application" +- "nginx" +- "misc" +"series": +- "focal" +- "jammy" +- "bionic" +- "xenial" +"requires": + "certificates": + "interface": "tls-certificates" + "ha": + "interface": "hacluster" + "apiserver": + "interface": "http" +"provides": + "nrpe-external-master": + "interface": "nrpe-external-master" + "scope": "container" + "website": + "interface": "http" + "loadbalancer": + "interface": "public-address" + "lb-consumers": + "interface": "loadbalancer" +"docs": "https://discourse.charmhub.io/t/kubeapi-load-balancer-docs-index/6213" +"subordinate": !!bool "false" diff --git a/kubeapi-load-balancer/metrics.yaml b/kubeapi-load-balancer/metrics.yaml new file mode 100644 index 0000000..0fcb3c1 --- /dev/null +++ b/kubeapi-load-balancer/metrics.yaml @@ -0,0 +1,2 @@ +metrics: + juju-units: {} diff --git a/kubeapi-load-balancer/pydocmd.yml b/kubeapi-load-balancer/pydocmd.yml new file mode 100644 index 0000000..ab3b2ef --- /dev/null +++ b/kubeapi-load-balancer/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'Status Management Layer' + +generate: + - status.md: + - charms.layer.status.WorkloadState + - charms.layer.status.maintenance + - charms.layer.status.maint + - charms.layer.status.blocked + - charms.layer.status.waiting + - charms.layer.status.active + - charms.layer.status.status_set + +pages: + - Status Management Layer: status.md + +gens_dir: docs diff --git a/kubeapi-load-balancer/pyproject.toml b/kubeapi-load-balancer/pyproject.toml new file mode 100644 index 0000000..db0dcd0 --- /dev/null +++ b/kubeapi-load-balancer/pyproject.toml @@ -0,0 +1,3 @@ +[tool.black] +line-length=120 +target-version=['py35'] diff --git a/kubeapi-load-balancer/reactive/__init__.py b/kubeapi-load-balancer/reactive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/reactive/apt.py b/kubeapi-load-balancer/reactive/apt.py new file mode 100644 index 0000000..8832296 --- /dev/null +++ b/kubeapi-load-balancer/reactive/apt.py @@ -0,0 +1,158 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import os.path +import subprocess +import re + +from charmhelpers import fetch +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING +from charms import layer +from charms.layer import status +from charms import reactive +from charms.reactive import when, when_not + +import charms.apt + + +@when('apt.needs_update') +def update(): + charms.apt.update() + + +@when('apt.queued_installs') +@when_not('apt.needs_update') +def install_queued(): + charms.apt.install_queued() + + +@when_not('apt.queued_installs') +def ensure_package_status(): + charms.apt.ensure_package_status() + + +def filter_installed_packages(packages): + # Don't use fetch.filter_installed_packages, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off + cmd = ['dpkg-query', '--show', r'--showformat=${Package}\n'] + installed = set(subprocess.check_output(cmd, universal_newlines=True).split()) + + # list of packages that are not installed + not_installed = set(packages) - installed + + # now we want to check for any regex in the installation of the packages + not_installed_iterable = not_installed.copy() + for pkg in not_installed_iterable: + # grab the pattern that we want to match against the packages + p = re.compile(pkg) + for pkg2 in installed: + matched = p.search(pkg2) + if matched: + not_installed.remove(pkg) + break + + return not_installed + + +def clear_removed_package_flags(): + """On hook startup, clear install flags for removed packages.""" + removed = filter_installed_packages(charms.apt.installed()) + if removed: + hookenv.log('{} missing packages ({})'.format(len(removed), ','.join(removed)), WARNING) + for package in removed: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def add_implicit_signing_keys(): + """Add keys specified in layer.yaml + + The charm can ship trusted keys, avoiding the need to specify + them in config.yaml. We need to add them before we attempt + to add any custom sources, or apt will block under Bionic + if we attempt to add a source before the key becomes trusted. + """ + opts = layer.options() + if 'apt' not in opts or 'keys' not in opts['apt']: + return + keys = opts['apt']['keys'] + for p in keys: + full_p = os.path.join(hookenv.charm_dir(), p) + if os.path.exists(full_p): + hookenv.log("Adding key {}".format(p), DEBUG) + subprocess.check_call( + ['apt-key', 'add', full_p], + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + else: + hookenv.log('Key {!r} does not exist'.format(full_p), ERROR) + + +def configure_sources(): + """Add user specified package sources from the service configuration. + + See charmhelpers.fetch.configure_sources for details. + """ + config = hookenv.config() + + # We don't have enums, so we need to validate this ourselves. + package_status = config.get('package_status') or '' + if package_status not in ('hold', 'install'): + status.blocked('Unknown package_status {}'.format(package_status)) + # Die before further hooks are run. This isn't very nice, but + # there is no other way to inform the operator that they have + # invalid configuration. + raise SystemExit(0) + + sources = config.get('install_sources') or '' + keys = config.get('install_keys') or '' + if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)): + fetch.configure_sources(update=False, sources_var='install_sources', keys_var='install_keys') + reactive.set_flag('apt.needs_update') + + # Clumsy 'config.get() or' per Bug #1641362 + extra_packages = sorted((config.get('extra_packages') or '').split()) + if extra_packages: + charms.apt.queue_install(extra_packages) + + +def queue_layer_packages(): + """Add packages listed in build-time layer options.""" + # Both basic and apt layer. basic layer will have already installed + # its defined packages, but rescheduling it here gets the apt layer + # flag set and they will pinned as any other apt layer installed + # package. + opts = layer.options() + for section in ['basic', 'apt']: + if section in opts and 'packages' in opts[section]: + charms.apt.queue_install(opts[section]['packages']) + + +hookenv.atstart(hookenv.log, 'Initializing Apt Layer') +hookenv.atstart(clear_removed_package_flags) +hookenv.atstart(add_implicit_signing_keys) +hookenv.atstart(configure_sources) +hookenv.atstart(queue_layer_packages) +hookenv.atstart(charms.apt.reset_application_version) diff --git a/kubeapi-load-balancer/reactive/hacluster.py b/kubeapi-load-balancer/reactive/hacluster.py new file mode 100644 index 0000000..4560270 --- /dev/null +++ b/kubeapi-load-balancer/reactive/hacluster.py @@ -0,0 +1,110 @@ +from charms import layer + +from charms.reactive import hook +from charms.reactive import when, when_not, clear_flag, set_flag, is_flag_set +from charms.reactive import endpoint_from_flag + +from charms.layer.kubernetes_common import get_ingress_address + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +db = unitdata.kv() + + +@hook('upgrade-charm') +def do_upgrade(): + # bump the services from upstart to systemd. :-/ + hacluster = endpoint_from_flag('ha.connected') + if not hacluster: + return + + if not is_flag_set('layer-hacluster.upgraded-systemd'): + services = db.get('layer-hacluster.services', {'current_services': {}, + 'desired_services': {}, + 'deleted_services': {}}) + for name, service in services['current_services'].items(): + hookenv.log("changing service {} to systemd service".format(name)) + hacluster.remove_init_service(name, service) + hacluster.add_systemd_service(name, service) + + # change any pending lsb entries to systemd + for name, service in services['desired_services'].items(): + msg = "changing pending service {} to systemd service" + hookenv.log(msg.format(name)) + hacluster.remove_init_service(name, service) + hacluster.add_systemd_service(name, service) + + clear_flag('layer-hacluster.configured') + set_flag('layer-hacluster.upgraded-systemd') + + +@when('ha.connected') +@when_not('layer-hacluster.configured') +def configure_hacluster(): + """Configure HA resources in corosync""" + hacluster = endpoint_from_flag('ha.connected') + vips = hookenv.config('ha-cluster-vip').split() + dns_record = hookenv.config('ha-cluster-dns') + if vips and dns_record: + set_flag('layer-hacluster.dns_vip.invalid') + msg = "Unsupported configuration. " \ + "ha-cluster-vip and ha-cluster-dns cannot both be set", + hookenv.log(msg) + return + else: + clear_flag('layer-hacluster.dns_vip.invalid') + if vips: + for vip in vips: + hacluster.add_vip(hookenv.application_name(), vip) + elif dns_record: + layer_options = layer.options('hacluster') + binding_address = layer_options.get('binding_address') + ip = get_ingress_address(binding_address) + hacluster.add_dnsha(hookenv.application_name(), ip, dns_record, + 'public') + + services = db.get('layer-hacluster.services', {'current_services': {}, + 'desired_services': {}, + 'deleted_services': {}}) + for name, service in services['deleted_services'].items(): + hacluster.remove_systemd_service(name, service) + for name, service in services['desired_services'].items(): + hacluster.add_systemd_service(name, service) + services['current_services'][name] = service + + services['deleted_services'] = {} + services['desired_services'] = {} + + hacluster.bind_resources() + set_flag('layer-hacluster.configured') + + +@when('config.changed.ha-cluster-vip', + 'ha.connected') +def update_vips(): + hacluster = endpoint_from_flag('ha.connected') + config = hookenv.config() + original_vips = set(config.previous('ha-cluster-vip').split()) + new_vips = set(config['ha-cluster-vip'].split()) + old_vips = original_vips - new_vips + + for vip in old_vips: + hacluster.remove_vip(hookenv.application_name(), vip) + + clear_flag('layer-hacluster.configured') + + +@when('config.changed.ha-cluster-dns', + 'ha.connected') +def update_dns(): + hacluster = endpoint_from_flag('ha.connected') + config = hookenv.config() + original_dns = set(config.previous('ha-cluster-dns').split()) + new_dns = set(config['ha-cluster-dns'].split()) + old_dns = original_dns - new_dns + + for dns in old_dns: + hacluster.remove_dnsha(hookenv.application_name, 'public') + + clear_flag('layer-hacluster.configured') diff --git a/kubeapi-load-balancer/reactive/leadership.py b/kubeapi-load-balancer/reactive/leadership.py new file mode 100644 index 0000000..29c6f3a --- /dev/null +++ b/kubeapi-load-balancer/reactive/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.leadership import leader_get, leader_set + + +__all__ = ['leader_get', 'leader_set'] # Backwards compatibility + + +def initialize_leadership_state(): + '''Initialize leadership.* states from the hook environment. + + Invoked by hookenv.atstart() so states are available in + @hook decorated handlers. + ''' + is_leader = hookenv.is_leader() + if is_leader: + hookenv.log('Initializing Leadership Layer (is leader)') + else: + hookenv.log('Initializing Leadership Layer (is follower)') + + reactive.helpers.toggle_state('leadership.is_leader', is_leader) + + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + current = hookenv.leader_get() + + # Handle deletions. + for key in set(previous.keys()) - set(current.keys()): + current[key] = None + + any_changed = False + for key, value in current.items(): + reactive.helpers.toggle_state('leadership.changed.{}'.format(key), + value != previous.get(key)) + if value != previous.get(key): + any_changed = True + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + reactive.helpers.toggle_state('leadership.changed', any_changed) + + unitdata.kv().update(current, prefix='leadership.settings.') + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(reactive, '_leadership_registered'): + hookenv.atstart(initialize_leadership_state) + reactive._leadership_registered = True diff --git a/kubeapi-load-balancer/reactive/load_balancer.py b/kubeapi-load-balancer/reactive/load_balancer.py new file mode 100644 index 0000000..43f502e --- /dev/null +++ b/kubeapi-load-balancer/reactive/load_balancer.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import os +import socket +import subprocess + +from pathlib import Path + +from charms.reactive import when, when_any, when_not +from charms.reactive import set_flag, is_state +from charms.reactive import hook +from charms.reactive import clear_flag, endpoint_from_flag, endpoint_from_name +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.contrib.charmsupport import nrpe + +from charms.layer import nginx +from charms.layer import tls_client +from charms.layer import status +from charms.layer import kubernetes_common +from charms.layer.hacluster import add_service_to_hacluster +from charms.layer.hacluster import remove_service_from_hacluster + +from subprocess import Popen +from subprocess import PIPE +from subprocess import STDOUT +from subprocess import CalledProcessError + +from typing import List + + +apilb_nginx = """/var/log/nginx.*.log { + daily + missingok + rotate 14 + compress + delaycompress + notifempty + create 0640 www-data adm + sharedscripts + prerotate + if [ -d /etc/logrotate.d/httpd-prerotate ]; then \\ + run-parts /etc/logrotate.d/httpd-prerotate; \\ + fi \\ + endscript + postrotate + invoke-rc.d nginx rotate >/dev/null 2>&1 + endscript +}""" + +cert_dir = Path("/srv/kubernetes/") +server_crt_path = cert_dir / "server.crt" +server_key_path = cert_dir / "server.key" + + +def _nrpe_external(flagname: str) -> str: + # wokeignore:rule=master + return f"nrpe-external-master.{flagname}" + + +@when("certificates.available") +def request_server_certificates(): + """Send the data that is required to create a server certificate for + this server.""" + # Use the public ip of this unit as the Common Name for the certificate. + common_name = hookenv.unit_public_ip() + + bind_ips = kubernetes_common.get_bind_addrs(ipv4=True, ipv6=True) + + # Create SANs that the tls layer will add to the server cert. + sans = [ + # The CN field is checked as a hostname, so if it's an IP, it + # won't match unless also included in the SANs as an IP field. + common_name, + kubernetes_common.get_ingress_address("website"), + socket.gethostname(), + socket.getfqdn(), + ] + bind_ips + forced_lb_ips = hookenv.config("loadbalancer-ips").split() + if forced_lb_ips: + sans.extend(forced_lb_ips) + else: + hacluster = endpoint_from_flag("ha.connected") + if hacluster: + vips = hookenv.config("ha-cluster-vip").split() + dns_record = hookenv.config("ha-cluster-dns") + if vips: + sans.extend(vips) + elif dns_record: + sans.append(dns_record) + + # maybe they have extra names they want as SANs + extra_sans = hookenv.config("extra_sans") + if extra_sans and not extra_sans == "": + sans.extend(extra_sans.split()) + # Request a server cert with this information. + tls_client.request_server_cert( + common_name, + sorted(set(sans)), + crt_path=server_crt_path, + key_path=server_key_path, + ) + + +@when("certificates.server.cert.available", "nginx.available") +@when_any("tls_client.certs.changed", "tls_client.ca.written") +def kick_nginx(tls): + # certificate changed, so sighup nginx + hookenv.log("Certificate information changed, sending SIGHUP to nginx") + host.service_restart("nginx") + clear_flag("tls_client.certs.changed") + clear_flag("tls_client.ca.written") + + +@when("config.changed.port") +def close_old_port(): + config = hookenv.config() + old_port = config.previous("port") + if not old_port: + return + try: + hookenv.close_port(old_port) + except CalledProcessError: + hookenv.log("Port %d already closed, skipping." % old_port) + + +def maybe_write_apilb_logrotate_config(): + filename = "/etc/logrotate.d/apilb_nginx" + if not os.path.exists(filename): + # Set log rotation for apilb log file + with open(filename, "w+") as fp: + fp.write(apilb_nginx) + + +def allow_lb_consumers_to_read_requests(): + lb_consumers = endpoint_from_name("lb-consumers") + lb_consumers.follower_perms(read=True) + return lb_consumers + + +@when("nginx.available", "tls_client.certs.saved") +@when_any("endpoint.lb-consumers.joined", "apiserver.available") +@when_not("upgrade.series.in-progress") +def install_load_balancer(): + """Create the default vhost template for load balancing""" + apiserver = endpoint_from_name("apiserver") + lb_consumers = allow_lb_consumers_to_read_requests() + + if not (server_crt_path.exists() and server_key_path.exists()): + hookenv.log("Skipping due to missing cert") + return + if not (apiserver.services() or lb_consumers.all_requests): + hookenv.log("Skipping due to requests not ready") + return + + # At this point the cert and key exist, and they are owned by root. + chown = ["chown", "www-data:www-data", str(server_crt_path)] + + # Change the owner to www-data so the nginx process can read the cert. + subprocess.call(chown) + chown = ["chown", "www-data:www-data", str(server_key_path)] + + # Change the owner to www-data so the nginx process can read the key. + subprocess.call(chown) + + servers = {} + if apiserver and apiserver.services(): + servers[hookenv.config("port")] = { + (h["hostname"], h["port"]) + for service in apiserver.services() + for h in service["hosts"] + } + for request in lb_consumers.all_requests: + for server_port in request.port_mapping.keys(): + service = servers.setdefault(server_port, set()) + service.update( + (backend, backend_port) + for backend, backend_port in itertools.product( + request.backends, request.port_mapping.values() + ) + ) + nginx.configure_site( + "apilb", + "apilb.conf", + servers=servers, + server_certificate=str(server_crt_path), + server_key=str(server_key_path), + proxy_read_timeout=hookenv.config("proxy_read_timeout"), + ) + + maybe_write_apilb_logrotate_config() + for listen_port in servers.keys(): + hookenv.open_port(listen_port) + status.active("Loadbalancer ready.") + + +@hook("upgrade-charm") +def upgrade_charm(): + if is_state("certificates.available") and is_state("website.available"): + request_server_certificates() + maybe_write_apilb_logrotate_config() + + +@hook("pre-series-upgrade") +def pre_series_upgrade(): + host.service_pause("nginx") + status.blocked("Series upgrade in progress") + + +@hook("post-series-upgrade") +def post_series_upgrade(): + host.service_resume("nginx") + + +@when("nginx.available") +def set_nginx_version(): + """Surface the currently deployed version of nginx to Juju""" + cmd = "nginx -v" + p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) + raw = p.stdout.read() + # The version comes back as: + # nginx version: nginx/1.10.0 (Ubuntu) + version = raw.split(b"/")[-1].split(b" ")[0] + hookenv.application_version_set(version.rstrip()) + + +def _get_lb_addresses() -> List[str]: + forced_lb_ips = hookenv.config("loadbalancer-ips").split() + if forced_lb_ips: + return forced_lb_ips + + if endpoint_from_flag("ha.connected"): + # in the hacluster world, we dump the vip or the dns + # on every unit's data. This is because the + # kubernetes-control-plane charm just grabs the first + # one it sees and uses that ip/dns. + vips = hookenv.config("ha-cluster-vip").split() + if vips: + return vips + + dns_records = hookenv.config("ha-cluster-dns").split() + if dns_records: + return dns_records + return [] + + +def _get_lb_port(prefer_private=True): + lb_consumers = endpoint_from_name("lb-consumers") + + # prefer a port from the newer, more explicit relations + public = filter(lambda r: r.public, lb_consumers.all_requests) + private = filter(lambda r: not r.public, lb_consumers.all_requests) + lb_reqs = (private, public) if prefer_private else (public, private) + for lb_req in itertools.chain(*lb_reqs): + return list(lb_req.port_mapping)[0] + + # fall back to the config + return hookenv.config("port") + + +@when("endpoint.lb-consumers.joined", "leadership.is_leader") +def provide_lb_consumers(): + """Respond to any LB requests via the lb-consumers relation. + + This is used in favor for the more complex two relation setup using the + website and loadbalancer relations going forward. + """ + lb_consumers = endpoint_from_name("lb-consumers") + lb_addresses = _get_lb_addresses() + for request in lb_consumers.all_requests: + response = request.response + if request.protocol not in ( + request.protocols.tcp, + request.protocols.http, + request.protocols.https, + ): + response.error_type = response.error_types.unsupported + response.error_fields = { + "protocol": "Protocol must be one of: tcp, http, https" + } + lb_consumers.send_response(request) + continue + if lb_addresses: + private_address = lb_addresses[0] + public_address = lb_addresses[0] + else: + network_info = hookenv.network_get("lb-consumers", str(request.relation.id)) + private_address = network_info["ingress-addresses"][0] + public_address = hookenv.unit_get("public-address") + if request.public: + response.address = public_address + else: + response.address = private_address + lb_consumers.send_response(request) + + +@when("website.available") +def provide_application_details(): + """re-use the nginx layer website relation to relay the hostname/port + to any consuming kubernetes-workers, or other units that require the + kubernetes API""" + website = endpoint_from_flag("website.available") + lb_addresses = _get_lb_addresses() + lb_port = _get_lb_port(prefer_private=True) + if lb_addresses: + website.configure( + port=lb_port, private_address=lb_addresses[0], hostname=lb_addresses[0] + ) + else: + website.configure(port=lb_port) + + +@when("loadbalancer.available") +def provide_loadbalancing(): + """Send the public address and port to the public-address interface, so + the subordinates can get the public address of this loadbalancer.""" + loadbalancer = endpoint_from_flag("loadbalancer.available") + lb_addresses = _get_lb_addresses() + lb_port = _get_lb_port(prefer_private=False) + if not lb_addresses: + lb_addresses = [hookenv.unit_get("public-address")] + loadbalancer.set_address_port(lb_addresses[0], lb_port) + + +@when(_nrpe_external("available")) +@when_not(_nrpe_external("initial-config")) +def initial_nrpe_config(nagios=None): + set_flag(_nrpe_external("initial-config")) + update_nrpe_config(nagios) + + +@when("nginx.available") +@when(_nrpe_external("available")) +@when_any("config.changed.nagios_context", "config.changed.nagios_servicegroups") +def update_nrpe_config(unused=None): + services = ("nginx",) + + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.add_init_service_checks(nrpe_setup, services, current_unit) + nrpe_setup.write() + + +@when_not(_nrpe_external("available")) +@when(_nrpe_external("initial-config")) +def remove_nrpe_config(nagios=None): + clear_flag(_nrpe_external("initial-config")) + + # List of systemd services for which the checks will be removed + services = ("nginx",) + + # use the charm-helpers code for now. + hostname = nrpe.get_nagios_hostname() + nrpe_setup = nrpe.NRPE(hostname=hostname) + + for service in services: + nrpe_setup.remove_check(shortname=service) + + +@when("nginx.available", "ha.connected") +def configure_hacluster(): + add_service_to_hacluster("nginx", "nginx") + set_flag("hacluster-configured") + + +@when_not("ha.connected") +@when("hacluster-configured") +def remove_hacluster(): + remove_service_from_hacluster("nginx", "nginx") + clear_flag("hacluster-configured") diff --git a/kubeapi-load-balancer/reactive/nginx.py b/kubeapi-load-balancer/reactive/nginx.py new file mode 100644 index 0000000..9a93006 --- /dev/null +++ b/kubeapi-load-balancer/reactive/nginx.py @@ -0,0 +1,33 @@ +from charms.reactive import ( + set_state, + when_not, + when +) + +from charms.layer import nginx + +from charmhelpers.core import hookenv + +config = hookenv.config() + + +# Handlers -------------------------------------------------------------------- +@when('apt.installed.nginx-full') +@when_not('nginx.available') +def nginx_ready(): + nginx.remove_default_site() + hookenv.status_set('active', 'NGINX is ready') + set_state('nginx.available') + + +# Example website.available reaction ------------------------------------------ +""" +This example reaction for an application layer which consumes this nginx layer. +If left here then this reaction may overwrite your top-level reaction depending +on service names, ie., both nginx and ghost have the same reaction method, +however, nginx will execute since it's a higher precedence. + +@when('nginx.available', 'website.available') +def configure_website(website): + website.configure(port=config['port']) +""" diff --git a/kubeapi-load-balancer/reactive/status.py b/kubeapi-load-balancer/reactive/status.py new file mode 100644 index 0000000..2f33f3f --- /dev/null +++ b/kubeapi-load-balancer/reactive/status.py @@ -0,0 +1,4 @@ +from charms import layer + + +layer.status._initialize() diff --git a/kubeapi-load-balancer/reactive/tls_client.py b/kubeapi-load-balancer/reactive/tls_client.py new file mode 100644 index 0000000..afa2228 --- /dev/null +++ b/kubeapi-load-balancer/reactive/tls_client.py @@ -0,0 +1,208 @@ +import os + +from pathlib import Path +from subprocess import check_call + +from charms import layer +from charms.reactive import hook +from charms.reactive import set_state, remove_state +from charms.reactive import when +from charms.reactive import set_flag, clear_flag +from charms.reactive import endpoint_from_flag +from charms.reactive.helpers import data_changed + +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core.hookenv import log + + +@when('certificates.ca.available') +def store_ca(tls): + '''Read the certificate authority from the relation object and install + the ca on this system.''' + # Get the CA from the relationship object. + certificate_authority = tls.get_ca() + if certificate_authority: + layer_options = layer.options('tls-client') + ca_path = layer_options.get('ca_certificate_path') + changed = data_changed('certificate_authority', certificate_authority) + if ca_path: + if changed or not os.path.exists(ca_path): + log('Writing CA certificate to {0}'.format(ca_path)) + # ensure we have a newline at the end of the certificate. + # some things will blow up without one. + # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034 + if not certificate_authority.endswith('\n'): + certificate_authority += '\n' + _write_file(ca_path, certificate_authority) + set_state('tls_client.ca.written') + set_state('tls_client.ca.saved') + if changed: + # Update /etc/ssl/certs and generate ca-certificates.crt + install_ca(certificate_authority) + + +@when('certificates.server.cert.available') +def store_server(tls): + '''Read the server certificate and server key from the relation object + and save them to the certificate directory..''' + server_cert, server_key = tls.get_server_cert() + chain = tls.get_chain() + if chain: + server_cert = server_cert + '\n' + chain + if server_cert and server_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('server_certificate_path') + key_path = layer_options.get('server_key_path') + cert_changed = data_changed('server_certificate', server_cert) + key_changed = data_changed('server_key', server_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing server certificate to {0}'.format(cert_path)) + _write_file(cert_path, server_cert) + set_state('tls_client.server.certificate.written') + set_state('tls_client.server.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing server key to {0}'.format(key_path)) + _write_file(key_path, server_key) + set_state('tls_client.server.key.saved') + + +@when('certificates.client.cert.available') +def store_client(tls): + '''Read the client certificate and client key from the relation object + and copy them to the certificate directory.''' + client_cert, client_key = tls.get_client_cert() + chain = tls.get_chain() + if chain: + client_cert = client_cert + '\n' + chain + if client_cert and client_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('client_certificate_path') + key_path = layer_options.get('client_key_path') + cert_changed = data_changed('client_certificate', client_cert) + key_changed = data_changed('client_key', client_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing client certificate to {0}'.format(cert_path)) + _write_file(cert_path, client_cert) + set_state('tls_client.client.certificate.written') + set_state('tls_client.client.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing client key to {0}'.format(key_path)) + _write_file(key_path, client_key) + set_state('tls_client.client.key.saved') + + +@when('certificates.certs.changed') +def update_certs(): + tls = endpoint_from_flag('certificates.certs.changed') + certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {}) + all_ready = True + any_changed = False + maps = { + 'server': tls.server_certs_map, + 'client': tls.client_certs_map, + } + + if maps.get('client') == {}: + log( + 'No client certs found using maps. Checking for global \ + client certificates.', + 'WARNING' + ) + # Check for global certs, + # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819 + cert_pair = tls.get_client_cert() + if cert_pair is not None: + for client_name in certs_paths.get('client', {}).keys(): + maps.get('client').update({ + client_name: cert_pair + }) + + chain = tls.get_chain() + for cert_type in ('server', 'client'): + for common_name, paths in certs_paths.get(cert_type, {}).items(): + cert_pair = maps[cert_type].get(common_name) + if not cert_pair: + all_ready = False + continue + if not data_changed('layer.tls-client.' + '{}.{}'.format(cert_type, common_name), cert_pair): + continue + + cert = None + key = None + if type(cert_pair) is not tuple: + if paths['crt']: + cert = cert_pair.cert + if paths['key']: + key = cert_pair.key + else: + cert, key = cert_pair + + if cert: + if chain: + cert = cert + '\n' + chain + _ensure_directory(paths['crt']) + Path(paths['crt']).write_text(cert) + + if key: + _ensure_directory(paths['key']) + Path(paths['key']).write_text(key) + + any_changed = True + # clear flags first to ensure they are re-triggered if left set + clear_flag('tls_client.{}.certs.changed'.format(cert_type)) + clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + set_flag('tls_client.{}.certs.changed'.format(cert_type)) + set_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + if all_ready: + set_flag('tls_client.certs.saved') + if any_changed: + clear_flag('tls_client.certs.changed') + set_flag('tls_client.certs.changed') + clear_flag('certificates.certs.changed') + + +def install_ca(certificate_authority): + '''Install a certificiate authority on the system by calling the + update-ca-certificates command.''' + if certificate_authority: + name = hookenv.service_name() + # Create a path to install CAs on Debian systems. + ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name) + log('Writing CA certificate to {0}'.format(ca_path)) + _write_file(ca_path, certificate_authority) + # Update the trusted CAs on this system (a time expensive operation). + check_call(['update-ca-certificates']) + log('Generated ca-certificates.crt for {0}'.format(name)) + set_state('tls_client.ca_installed') + + +@hook('upgrade-charm') +def remove_states(): + remove_state('tls_client.ca.saved') + remove_state('tls_client.server.certificate.saved') + remove_state('tls_client.server.key.saved') + remove_state('tls_client.client.certificate.saved') + remove_state('tls_client.client.key.saved') + + +def _ensure_directory(path): + '''Ensure the parent directory exists creating directories if necessary.''' + directory = os.path.dirname(path) + if not os.path.isdir(directory): + os.makedirs(directory) + os.chmod(directory, 0o770) + + +def _write_file(path, content): + '''Write the path to a file.''' + _ensure_directory(path) + with open(path, 'w') as stream: + stream.write(content) + os.chmod(path, 0o440) diff --git a/kubeapi-load-balancer/requirements.txt b/kubeapi-load-balancer/requirements.txt new file mode 100644 index 0000000..55543d9 --- /dev/null +++ b/kubeapi-load-balancer/requirements.txt @@ -0,0 +1,3 @@ +mock +flake8 +pytest diff --git a/kubeapi-load-balancer/revision b/kubeapi-load-balancer/revision new file mode 100644 index 0000000..c227083 --- /dev/null +++ b/kubeapi-load-balancer/revision @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/kubeapi-load-balancer/templates/.gitkeep b/kubeapi-load-balancer/templates/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/templates/apilb.conf b/kubeapi-load-balancer/templates/apilb.conf new file mode 100644 index 0000000..a14ba95 --- /dev/null +++ b/kubeapi-load-balancer/templates/apilb.conf @@ -0,0 +1,42 @@ +{% for server_port, backends in servers.items() -%} +upstream upstream_{{ server_port }} { + {%- for backend, backend_port in backends %} + server {{ backend }}:{{ backend_port }}; + {%- endfor %} +} + +server { + listen {{ server_port }} ssl http2; + listen [::]:{{ server_port }} ssl http2 ipv6only=on; + server_name server_{{ server_port }}; + + access_log /var/log/nginx.access.log; + error_log /var/log/nginx.error.log; + + ssl on; + ssl_session_cache builtin:1000 shared:SSL:10m; + ssl_certificate {{ server_certificate }}; + ssl_certificate_key {{ server_key }}; + ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4; + ssl_prefer_server_ciphers on; + + + location / { + proxy_buffering off; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Proto-Version $http2; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $http_connection; + proxy_set_header X-Stream-Protocol-Version $http_x_stream_protocol_version; + + add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version; + + proxy_pass https://upstream_{{ server_port }}; + proxy_read_timeout {{ proxy_read_timeout }}; + } +} +{%- endfor %} diff --git a/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml b/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubeapi-load-balancer/templates/vhost.conf.ex b/kubeapi-load-balancer/templates/vhost.conf.ex new file mode 100644 index 0000000..253be36 --- /dev/null +++ b/kubeapi-load-balancer/templates/vhost.conf.ex @@ -0,0 +1,18 @@ +server { + listen 80; + + server_name {{server_name}}; + + location / { + proxy_redirect off; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + proxy_set_header X-NginX-Proxy true; + proxy_set_header Connection ""; + proxy_http_version 1.1; + proxy_pass http://{{host}}:{{port}}; + + } +} diff --git a/kubeapi-load-balancer/tests/data/charm.yaml b/kubeapi-load-balancer/tests/data/charm.yaml new file mode 100644 index 0000000..127fd24 --- /dev/null +++ b/kubeapi-load-balancer/tests/data/charm.yaml @@ -0,0 +1,10 @@ +description: A minimal two-machine Kubernetes cluster, appropriate for development. +applications: + kubeapi-load-balancer: + charm: {{charm}} + num_units: 1 + expose: true +relations: +- [kubeapi-load-balancer:lb-consumers, kubernetes-control-plane:loadbalancer-internal] +- [kubeapi-load-balancer:lb-consumers, kubernetes-control-plane:loadbalancer-external] +- [kubeapi-load-balancer:certificates, easyrsa:client] diff --git a/kubeapi-load-balancer/tests/data/ip_addr_json b/kubeapi-load-balancer/tests/data/ip_addr_json new file mode 100644 index 0000000..9b10664 --- /dev/null +++ b/kubeapi-load-balancer/tests/data/ip_addr_json @@ -0,0 +1,30 @@ +[ + { + "ifname": "ens192", + "operstate": "UP", + "addr_info": [ + { + "local": "10.246.154.77", + "prefixlen": 24, + "metric": 100 + }, + {} + ] + }, + { + "ifname": "lxdbr0", + "operstate": "UP", + "addr_info": [ + { + "local": "10.111.246.1", + "prefixlen": 24 + } + ] + }, + { + "link_index": 4, + "ifname": "veth890e3a36", + "operstate": "UP", + "addr_info": [] + } +] \ No newline at end of file diff --git a/kubeapi-load-balancer/tests/functional/conftest.py b/kubeapi-load-balancer/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubeapi-load-balancer/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubeapi-load-balancer/tests/functional/test_k8s_common.py b/kubeapi-load-balancer/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubeapi-load-balancer/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py b/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py new file mode 100644 index 0000000..5057b63 --- /dev/null +++ b/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py @@ -0,0 +1,78 @@ +import logging +from pathlib import Path +import pytest +import shlex + + +log = logging.getLogger(__name__) + + +def _check_status_messages(ops_test): + """Validate that the status messages are correct.""" + expected_messages = { + "kubernetes-control-plane": "Kubernetes control-plane running.", + "kubernetes-worker": "Kubernetes worker running.", + "kubeapi-load-balancer": "Loadbalancer ready.", + } + for app, message in expected_messages.items(): + for unit in ops_test.model.applications[app].units: + assert unit.workload_status_message == message + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test): + charm = next(Path.cwd().glob("kubeapi*.charm"), None) + if not charm: + log.info("Build Charm...") + charm = await ops_test.build_charm(".") + + overlays = [ + ops_test.Bundle("kubernetes-core", channel="edge"), + Path("tests/data/charm.yaml"), + ] + bundle, *overlays = await ops_test.async_render_bundles( + *overlays, charm=charm.resolve() + ) + + log.info("Deploy Charm...") + model = ops_test.model_full_name + cmd = f"juju deploy -m {model} {bundle} " + " ".join( + f"--overlay={f}" for f in overlays + ) + rc, stdout, stderr = await ops_test.run(*shlex.split(cmd)) + assert rc == 0, f"Bundle deploy failed: {(stderr or stdout).strip()}" + + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60) + _check_status_messages(ops_test) + + +async def test_load_balancer_forced_address(ops_test): + """Validate that the first forced address is passed in lb-consumers relation.""" + api_lb = ops_test.model.applications["kubeapi-load-balancer"] + address = api_lb.units[0].data["public-address"] + await api_lb.set_config({"loadbalancer-ips": address}) + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=10 * 60) + + try: + worker = ops_test.model.applications["kubernetes-worker"] + action = await worker.units[0].run( + "cat /root/cdk/kubeproxyconfig | grep server" + ) + result = await action.wait() + assert f"https://{address}" in result.results["stdout"] + finally: + await api_lb.reset_config(["loadbalancer-ips"]) + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=10 * 60) + + +async def test_kube_api_endpoint(ops_test): + """Validate that using the old MITM-style relation works""" + k8s_cp = ops_test.model.applications["kubernetes-control-plane"] + worker = ops_test.model.applications["kubernetes-worker"] + await k8s_cp.remove_relation("loadbalancer-internal", "kubeapi-load-balancer") + await k8s_cp.remove_relation("loadbalancer-external", "kubeapi-load-balancer") + await k8s_cp.add_relation("kube-api-endpoint", "kubeapi-load-balancer") + await k8s_cp.add_relation("loadbalancer", "kubeapi-load-balancer") + await worker.add_relation("kube-api-endpoint", "kubeapi-load-balancer") + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=30 * 60) + _check_status_messages(ops_test) diff --git a/kubeapi-load-balancer/tests/unit/conftest.py b/kubeapi-load-balancer/tests/unit/conftest.py new file mode 100644 index 0000000..712e8f1 --- /dev/null +++ b/kubeapi-load-balancer/tests/unit/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + +charms.unit_test.patch_reactive() +charms.unit_test.patch_module("subprocess") diff --git a/kubeapi-load-balancer/tests/unit/test_k8s_common.py b/kubeapi-load-balancer/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..5e4fc56 --- /dev/null +++ b/kubeapi-load-balancer/tests/unit/test_k8s_common.py @@ -0,0 +1,148 @@ +import json +import string +from subprocess import CalledProcessError +from pathlib import Path +from unittest.mock import Mock, patch +from charms.reactive import endpoint_from_flag + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" + + +@patch("os.listdir") +@patch("os.remove") +@patch("os.symlink") +def test_configure_default_cni(os_symlink, os_remove, os_listdir): + os_listdir.return_value = ["05-default.conflist", "10-cni.conflist"] + cni = endpoint_from_flag("cni.available") + cni.get_config.return_value = { + "cidr": "192.168.0.0/24", + "cni-conf-file": "10-cni.conflist", + } + kc.configure_default_cni("test-cni") + os_remove.assert_called_once_with("/etc/cni/net.d/05-default.conflist") + os_symlink.assert_called_once_with( + "10-cni.conflist", "/etc/cni/net.d/05-default.conflist" + ) + + +def test_get_bind_addrs(): + response = Path("tests", "data", "ip_addr_json").read_bytes() + with patch.object(kc, "check_output", return_value=response): + addrs = kc.get_bind_addrs() + assert addrs == ["10.246.154.77"] diff --git a/kubeapi-load-balancer/tests/unit/test_kubeapi_load_balancer.py b/kubeapi-load-balancer/tests/unit/test_kubeapi_load_balancer.py new file mode 100644 index 0000000..4ea2763 --- /dev/null +++ b/kubeapi-load-balancer/tests/unit/test_kubeapi_load_balancer.py @@ -0,0 +1,17 @@ +from charmhelpers.core import host # patched + +from reactive import load_balancer as handlers + + +def test_series_upgrade(): + assert host.service_pause.call_count == 0 + assert host.service_resume.call_count == 0 + assert handlers.status.blocked.call_count == 0 + handlers.pre_series_upgrade() + assert host.service_pause.call_count == 1 + assert host.service_resume.call_count == 0 + assert handlers.status.blocked.call_count == 1 + handlers.post_series_upgrade() + assert host.service_pause.call_count == 1 + assert host.service_resume.call_count == 1 + assert handlers.status.blocked.call_count == 1 diff --git a/kubeapi-load-balancer/tests/validate-wheelhouse.sh b/kubeapi-load-balancer/tests/validate-wheelhouse.sh new file mode 100755 index 0000000..329dfca --- /dev/null +++ b/kubeapi-load-balancer/tests/validate-wheelhouse.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +build_dir="$(mktemp -d)" +function cleanup { rm -rf "$build_dir"; } +trap cleanup EXIT + +charm build . --build-dir "$build_dir" +pip install -f "$build_dir/kubeapi-load-balancer/wheelhouse" --no-index --no-cache-dir "$build_dir"/kubeapi-load-balancer/wheelhouse/* diff --git a/kubeapi-load-balancer/tox.ini b/kubeapi-load-balancer/tox.ini new file mode 100644 index 0000000..cf08339 --- /dev/null +++ b/kubeapi-load-balancer/tox.ini @@ -0,0 +1,60 @@ +[tox] +skipsdist = True +envlist = lint,unit + +[testenv] +basepython = python3 +setenv = + PYTHONPATH={toxinidir} + +[testenv:unit] +deps = + pytest + ipdb + charms.unit_test +commands = pytest --tb native -s --show-capture=no --log-cli-level=INFO {posargs} {toxinidir}/tests/unit + +[testenv:integration] +deps = + pytest + pytest-operator + ipdb + juju < 3.1 +commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration + +[testenv:format] +deps = + black +commands = + black {toxinidir}/reactive {toxinidir}/tests + +[testenv:lint] +deps = + flake8 + black + mypy +commands = + flake8 {toxinidir}/reactive {toxinidir}/tests + black --check {toxinidir}/reactive {toxinidir}/tests + mypy --config-file={toxinidir}/tox.ini {toxinidir}/reactive + +[testenv:validate-wheelhouse] +deps = +# Temporarily pin setuptools to avoid the breaking change from 58 until +# all dependencies we use have a chance to update. +# See: https://setuptools.readthedocs.io/en/latest/history.html#v58-0-0 +# and: https://github.com/pypa/setuptools/issues/2784#issuecomment-917663223 + setuptools<58 +allowlist_externals = {toxinidir}/tests/validate-wheelhouse.sh +commands = {toxinidir}/tests/validate-wheelhouse.sh + +[flake8] +max-line-length = 88 + +[mypy] + +[mypy-charms.*] +ignore_missing_imports = True + +[mypy-charmhelpers.*] +ignore_missing_imports = True diff --git a/kubeapi-load-balancer/version b/kubeapi-load-balancer/version new file mode 100644 index 0000000..2e628eb --- /dev/null +++ b/kubeapi-load-balancer/version @@ -0,0 +1 @@ +1.24+ck1-1-g4db8833 \ No newline at end of file diff --git a/kubeapi-load-balancer/wheelhouse.txt b/kubeapi-load-balancer/wheelhouse.txt new file mode 100644 index 0000000..42d41a7 --- /dev/null +++ b/kubeapi-load-balancer/wheelhouse.txt @@ -0,0 +1,37 @@ +# layer:basic +# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164 +# even with installing setuptools before upgrading pip ends up with pip seeing +# the older setuptools at the system level if include_system_packages is true +pip>=18.1,<19.0;python_version < '3.8' +pip;python_version >= '3.8' +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty +Jinja2==2.10;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +Jinja2==2.11;python_version == '2.7' or python_version == '3.5' # py27, py35 +Jinja2;python_version >= '3.6' # py36 and on + +PyYAML==5.2;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +PyYAML<5.4;python_version == '2.7' or python_version >= '3.5' # all else + +MarkupSafe<2.0.0;python_version < '3.6' +MarkupSafe<2.1.0;python_version == '3.6' # Just for python 3.6 +MarkupSafe;python_version >= '3.7' # newer pythons + +setuptools<42;python_version < '3.8' +setuptools;python_version >= '3.8' +setuptools-scm<=1.17.0;python_version < '3.8' +setuptools-scm;python_version >= '3.8' +flit_core;python_version >= '3.8' +charmhelpers>=0.4.0,<2.0.0 +charms.reactive>=0.1.0,<2.0.0 +wheel<0.34;python_version < '3.8' +wheel;python_version >= '3.8' +# pin netaddr to avoid pulling importlib-resources +netaddr<=0.7.19 + +# layer:nginx +toml + +# kubeapi-load-balancer +loadbalancer-interface + diff --git a/kubeapi-load-balancer/wheelhouse/Jinja2-3.0.3.tar.gz b/kubeapi-load-balancer/wheelhouse/Jinja2-3.0.3.tar.gz new file mode 100644 index 0000000..cb150bc Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/Jinja2-3.0.3.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/MarkupSafe-2.0.1.tar.gz b/kubeapi-load-balancer/wheelhouse/MarkupSafe-2.0.1.tar.gz new file mode 100644 index 0000000..7a37fc9 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/MarkupSafe-2.0.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/PyYAML-5.3.1.tar.gz b/kubeapi-load-balancer/wheelhouse/PyYAML-5.3.1.tar.gz new file mode 100644 index 0000000..915d67b Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/PyYAML-5.3.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz b/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz new file mode 100644 index 0000000..501f2c0 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/charmhelpers-1.2.1.tar.gz b/kubeapi-load-balancer/wheelhouse/charmhelpers-1.2.1.tar.gz new file mode 100644 index 0000000..78f281b Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/charmhelpers-1.2.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/charms.reactive-1.5.2.tar.gz b/kubeapi-load-balancer/wheelhouse/charms.reactive-1.5.2.tar.gz new file mode 100644 index 0000000..433c84f Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/charms.reactive-1.5.2.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.2.0.tar.gz b/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.2.0.tar.gz new file mode 100644 index 0000000..da5a119 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.2.0.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.1.tar.gz b/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.1.tar.gz new file mode 100644 index 0000000..8030d97 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz b/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz new file mode 100644 index 0000000..642941a Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz b/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz new file mode 100644 index 0000000..cc31d9d Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/netaddr-0.7.19.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/ops-1.5.5.tar.gz b/kubeapi-load-balancer/wheelhouse/ops-1.5.5.tar.gz new file mode 100644 index 0000000..02a84ba Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/ops-1.5.5.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz b/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz new file mode 100644 index 0000000..14f5ded Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/pbr-5.11.1.tar.gz b/kubeapi-load-balancer/wheelhouse/pbr-5.11.1.tar.gz new file mode 100644 index 0000000..6235267 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pbr-5.11.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz b/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz new file mode 100644 index 0000000..a18192d Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pip-18.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz b/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip b/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip new file mode 100644 index 0000000..3345759 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/setuptools-41.6.0.zip differ diff --git a/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz new file mode 100644 index 0000000..43b16c7 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/setuptools_scm-1.17.0.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz b/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz new file mode 100644 index 0000000..41dd278 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/toml-0.10.2.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz b/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz new file mode 100644 index 0000000..c922c4e Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/wheel-0.33.6.tar.gz differ diff --git a/kubernetes-control-plane/.build.manifest b/kubernetes-control-plane/.build.manifest new file mode 100644 index 0000000..9ea672e --- /dev/null +++ b/kubernetes-control-plane/.build.manifest @@ -0,0 +1,2820 @@ +{ + "layers": [ + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", + "url": "layer:options" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fb767dcf0786d1d5364199bb3b40bdc86518b45b", + "url": "layer:basic" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "a0e1f28e8bb9040eada9a7a73f66ee6a615704b7", + "url": "layer:snap" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", + "url": "layer:debug" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", + "url": "layer:tls-client" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", + "url": "layer:leadership" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", + "url": "layer:metrics" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", + "url": "layer:nagios" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688", + "url": "layer:cdk-service-kicker" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "195fa11c4b087cef044b9bd3a8b8d2b2540cb727", + "url": "layer:cis-benchmark" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf", + "url": "layer:coordinator" + }, + { + "branch": "refs/heads/stable", + "rev": "b93fae0e73bb48074deb0062db204b621caa9f1f", + "url": "layer:kubernetes-common" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "85ede006f2588cf6f95a05f9287c4094ae1503c3", + "url": "layer:kubernetes-node-base" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "6aedcd2f267678648d0b905b49c24bdb9fc9e690", + "url": "layer:vault-kv" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", + "url": "layer:status" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34", + "url": "layer:apt" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "2c4c16cd9e4254494d79aac1d17eacf1620d1b0f", + "url": "layer:vaultlocker" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "5b0926cdc45f511a0040b0b26f89bd174d5c81eb", + "url": "layer:hacluster" + }, + { + "branch": "refs/heads/stable", + "rev": "59313f55da9b45126d1577fe87afab7c8fe20409", + "url": "kubernetes-control-plane" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d9850016d930a6d507b9fd45e2598d327922b140", + "url": "interface:tls-certificates" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "95d744d1dbc4d86fb0462283c9371619bf5bbc24", + "url": "interface:nrpe-external-master" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", + "url": "interface:container-runtime" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "dceab99ac3739cc7265e386287f100f1bfebc47f", + "url": "interface:vault-kv" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "2b714e90b1b8845ce7390bb1dad5a56a65437907", + "url": "interface:hacluster" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "c1061a29297084fa53c2474ba371671186ff3389", + "url": "interface:ceph-admin" + }, + { + "rev": "3ca251fb01a7cd51d2a0ee2b9b66647c8ffe891f", + "url": "interface:ceph-client" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382", + "url": "interface:etcd" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", + "url": "interface:http" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "3ebfa8c70580aec7d9fcd2be1c74cef3457117f3", + "url": "interface:kubernetes-cni" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "9bc32742b7720a755ada9526424e5d80092e1536", + "url": "interface:kube-dns" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "ff5434d8353292057a591dddc5ca749aea2c3b5f", + "url": "interface:kube-control" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "0a1abd4d936983f99ccc0f262d4ba8e6012169af", + "url": "interface:kube-masters" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27", + "url": "interface:public-address" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7", + "url": "interface:aws-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb", + "url": "interface:gcp-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "aa365314041ccbd0018e7c73e0de39eed9be045f", + "url": "interface:openstack-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b", + "url": "interface:vsphere-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "8d2202e433d7c188de4df2fd4bddb355193e93ac", + "url": "interface:azure-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "53e93b8820899f2251d207ed5d5c3b212ceb64de", + "url": "interface:keystone-credentials" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "3f775242c16d53243c993d7ba0c896169ad1639e", + "url": "interface:prometheus-manual" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "e64261e281f012a00d374c6779ec52e488cb8713", + "url": "interface:grafana-dashboard" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "f9dcae4931f6c64383713d7e567686c1bb662e3f", + "url": "interface:aws-iam" + } + ], + "signatures": { + ".build.manifest": [ + "build", + "dynamic", + "unchecked" + ], + ".github/workflows/main.yaml": [ + "layer:kubernetes-node-base", + "static", + "0e5670aa0458f545b05ee907c9f7e31c4bb78b7db68f982a6ce59996f55903a4" + ], + ".github/workflows/main.yml": [ + "kubernetes-control-plane", + "static", + "24a800c782617920f194bfea876adcc519b86f2923b3e27391e90316244b653c" + ], + ".gitignore": [ + "kubernetes-control-plane", + "static", + "4dffb8b59bdeb567ff6b815bf7c33dfc02cd4a7d89a16bdac522b89e20e4672e" + ], + ".travis.yml": [ + "layer:cis-benchmark", + "static", + "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2" + ], + ".wokeignore": [ + "kubernetes-control-plane", + "static", + "756dcfcddff993c7fd6cf14a402d97e7cbd9faafa6339aa722d19eeceb6d1391" + ], + "CONTRIBUTING.md": [ + "kubernetes-control-plane", + "static", + "7f4f59165d99b6e3b84e077a6d62cfece793123096b09cc1333e770364e56504" + ], + "LICENSE": [ + "kubernetes-control-plane", + "static", + "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd" + ], + "Makefile": [ + "layer:basic", + "static", + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" + ], + "README.md": [ + "kubernetes-control-plane", + "static", + "f355510f457c12bb6c3f02a858561292ee4c6e299cc26b4bb7ba39cbcb29adb0" + ], + "actions.yaml": [ + "kubernetes-control-plane", + "dynamic", + "6871b201abbb65d1ab1e0235254b70b89a4a52f2c13c735d803d3ae6e2494d39" + ], + "actions/apply-manifest": [ + "kubernetes-control-plane", + "static", + "0d938fdd216d242bb731cb1a0dc07ca134d9335acebe2d0c13d14aacc3075153" + ], + "actions/cis-benchmark": [ + "layer:cis-benchmark", + "static", + "bb727abb091314e91274f2367eae173c3a5303f289d539e21ff0587d01a32de1" + ], + "actions/debug": [ + "layer:debug", + "static", + "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046" + ], + "actions/get-kubeconfig": [ + "kubernetes-control-plane", + "static", + "0d938fdd216d242bb731cb1a0dc07ca134d9335acebe2d0c13d14aacc3075153" + ], + "actions/kubectl-actions.py": [ + "kubernetes-control-plane", + "static", + "0d938fdd216d242bb731cb1a0dc07ca134d9335acebe2d0c13d14aacc3075153" + ], + "actions/namespace-create": [ + "kubernetes-control-plane", + "static", + "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c" + ], + "actions/namespace-delete": [ + "kubernetes-control-plane", + "static", + "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c" + ], + "actions/namespace-list": [ + "kubernetes-control-plane", + "static", + "fc25a90c3bdecc883028f789b5061980591a7bc26398666b8dc3e24e09c9be1c" + ], + "actions/restart": [ + "kubernetes-control-plane", + "static", + "72cb46d4971f057fdbbc901599a735a7ce3d61e7ae9b2687c9e9b4cd478e26d0" + ], + "actions/upgrade": [ + "kubernetes-control-plane", + "static", + "048357986ce428c0dad3058a595e475d8ea0741627d77a18e70f9f9a8b3e63bf" + ], + "actions/user-create": [ + "kubernetes-control-plane", + "static", + "a8e850df8e3d2146d7ead992d2c155fe04defb64c6571ba31ce40d7411f8d148" + ], + "actions/user-delete": [ + "kubernetes-control-plane", + "static", + "a8e850df8e3d2146d7ead992d2c155fe04defb64c6571ba31ce40d7411f8d148" + ], + "actions/user-list": [ + "kubernetes-control-plane", + "static", + "a8e850df8e3d2146d7ead992d2c155fe04defb64c6571ba31ce40d7411f8d148" + ], + "actions/user_actions.py": [ + "kubernetes-control-plane", + "static", + "a8e850df8e3d2146d7ead992d2c155fe04defb64c6571ba31ce40d7411f8d148" + ], + "bin/charm-env": [ + "layer:basic", + "static", + "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5" + ], + "bin/layer_option": [ + "layer:options", + "static", + "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc" + ], + "build-cni-resources.sh": [ + "kubernetes-control-plane", + "static", + "2acb456393d677f9f3f259c5bf90a5c0155e0e8aed15dee22100d13034d1dc47" + ], + "config.yaml": [ + "kubernetes-control-plane", + "dynamic", + "1c77a762eb91b81fe97617248afbb3eada1b326931cbaf6560ebdddfafc166c9" + ], + "copyright": [ + "kubernetes-control-plane", + "static", + "1eedc4e165789729bc492abd80e34ac85dcb0ec429eebdf225129b9b0bfc3502" + ], + "copyright.layer-apt": [ + "layer:apt", + "static", + "5123b2d0220fefb4424a463216fb41a6dd7cfad49c9799ba7037f1e74a2fd6bc" + ], + "copyright.layer-basic": [ + "layer:basic", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-coordinator": [ + "layer:coordinator", + "static", + "7d212a095a6143559fb51f26bc40c2ba24b977190f65c7e5c835104f54d5dfc5" + ], + "copyright.layer-leadership": [ + "layer:leadership", + "static", + "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9" + ], + "copyright.layer-metrics": [ + "layer:metrics", + "static", + "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c" + ], + "copyright.layer-nagios": [ + "layer:nagios", + "static", + "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749" + ], + "copyright.layer-options": [ + "layer:options", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-snap": [ + "layer:snap", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "copyright.layer-status": [ + "layer:status", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "copyright.layer-vault-kv": [ + "layer:vault-kv", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "copyright.layer-vaultlocker": [ + "layer:vaultlocker", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "debug-scripts/auth-webhook": [ + "kubernetes-control-plane", + "static", + "08185f547fe131bf7ecd7d65fd7cfaa246f613e9ccd6fcf666eb02f6d987a7e8" + ], + "debug-scripts/charm-unitdata": [ + "layer:debug", + "static", + "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7" + ], + "debug-scripts/filesystem": [ + "layer:debug", + "static", + "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7" + ], + "debug-scripts/juju-logs": [ + "layer:debug", + "static", + "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e" + ], + "debug-scripts/juju-network-get": [ + "layer:debug", + "static", + "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5" + ], + "debug-scripts/kubectl": [ + "kubernetes-control-plane", + "static", + "696848b11b760ab278b02b650ffda2adc8ba75c6701d574bdec0a7a1a75aea7e" + ], + "debug-scripts/kubernetes-master-services": [ + "kubernetes-control-plane", + "static", + "f9930483765f715098c7e6a6b21e08105aff7dea4ecddc68fb8b6480951242d3" + ], + "debug-scripts/network": [ + "layer:debug", + "static", + "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e" + ], + "debug-scripts/packages": [ + "layer:debug", + "static", + "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75" + ], + "debug-scripts/sysctl": [ + "layer:debug", + "static", + "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a" + ], + "debug-scripts/systemd": [ + "layer:debug", + "static", + "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106" + ], + "debug-scripts/tls-certs": [ + "layer:tls-client", + "static", + "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b" + ], + "docs/README": [ + "kubernetes-control-plane", + "static", + "ea099038f01227b2907a915aa9e93d9ed73d85f9b446edcbe079c8a8de21e0cf" + ], + "docs/index.md": [ + "kubernetes-control-plane", + "static", + "90cfcf5db6784ef45e849f43a8d50c7ed2a7352552293a1f710f85316367adc7" + ], + "docs/status.md": [ + "layer:status", + "static", + "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a" + ], + "docs/vault-kv.md": [ + "layer:vault-kv", + "static", + "96d97a5ff204f4ce12efdecea33c1a118deee383c2c067bfcce760b56e00c635" + ], + "docs/vaultlocker.md": [ + "layer:vaultlocker", + "static", + "a4dfe20b9ca14895d3b98658f5848dac61eefa62b2ea6f317ab2c2e65d151372" + ], + "exec.d/docker-compose/charm-pre-install": [ + "layer:kubernetes-node-base", + "static", + "32482c2a88209cbe512990db5fb4deabdcff88282bf7c7dd71a265383139fc77" + ], + "exec.d/vmware-patch/charm-pre-install": [ + "kubernetes-control-plane", + "static", + "9f98f70669ddd949ff83c7b408b678ae170bf41e4faa2828b4d66bd47acca93e" + ], + "hooks/aws-iam-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-iam-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-iam-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-iam-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-iam-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-client-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-client-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-client-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-client-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-client-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-storage-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-storage-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-storage-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-storage-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ceph-storage-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/collect-metrics": [ + "layer:metrics", + "static", + "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558" + ], + "hooks/config-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-changed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/coordinator-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-departed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/coordinator-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/dns-provider-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/dns-provider-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/dns-provider-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/dns-provider-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/dns-provider-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/etcd-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/etcd-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/etcd-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/etcd-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/etcd-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/external-cloud-provider-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/external-cloud-provider-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/external-cloud-provider-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/external-cloud-provider-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/external-cloud-provider-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ha-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/hook.template": [ + "layer:basic", + "static", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/install": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/keystone-credentials-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/keystone-credentials-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/keystone-credentials-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/keystone-credentials-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/keystone-credentials-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-masters-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-masters-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-masters-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-masters-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-masters-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/leader-elected": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/leader-settings-changed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/loadbalancer-external-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/post-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/pre-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/relations/aws-iam/LICENSE": [ + "interface:aws-iam", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "hooks/relations/aws-iam/README.md": [ + "interface:aws-iam", + "static", + "ccb355baee91178627bfe675466095073b324116fd84859d248680a77d951d08" + ], + "hooks/relations/aws-iam/__init__.py": [ + "interface:aws-iam", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/aws-iam/interface.yaml": [ + "interface:aws-iam", + "static", + "b44092fdbbbc9696169d85224acb4ea34a870ffb03b4a1c3e7d01719d1b47cf6" + ], + "hooks/relations/aws-iam/provides.py": [ + "interface:aws-iam", + "static", + "176d6cd61122bbc3f83a090a1d5c4ac95139e477079fcdefd7ded5863979a4ac" + ], + "hooks/relations/aws-iam/requires.py": [ + "interface:aws-iam", + "static", + "65fab5cb68b1ba2d2cee5c11e74a4ed0002321079af4019a8dde00f83d6c8188" + ], + "hooks/relations/aws-integration/.gitignore": [ + "interface:aws-integration", + "static", + "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b" + ], + "hooks/relations/aws-integration/LICENSE": [ + "interface:aws-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/aws-integration/README.md": [ + "interface:aws-integration", + "static", + "1585d72b136158ce0741fc2ce0d7710c1ec55662f846afe2e768a4708c51057e" + ], + "hooks/relations/aws-integration/__init__.py": [ + "interface:aws-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/aws-integration/copyright": [ + "interface:aws-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/aws-integration/docs/provides.md": [ + "interface:aws-integration", + "static", + "a7669f49156173c27ede87105f6e65a07e1e5e41f3c154a24e1a82f307f65073" + ], + "hooks/relations/aws-integration/docs/requires.md": [ + "interface:aws-integration", + "static", + "09553e5f07f216e5234125fdf38a21af00ab11349cdb788b21703ae72b0aeed1" + ], + "hooks/relations/aws-integration/interface.yaml": [ + "interface:aws-integration", + "static", + "4449f48e5aaa99c0bb3e8e1c9833d11d3b20fc5f81ae1f15b6442af5ec873167" + ], + "hooks/relations/aws-integration/make_docs": [ + "interface:aws-integration", + "static", + "b471fefc7eaa5c377d47b2b63481d6c8f4c5e9d224428efe93c5abbd13a0817d" + ], + "hooks/relations/aws-integration/provides.py": [ + "interface:aws-integration", + "static", + "ee8f91b281d9112999f3d0e1d2ac17964fca3af5102fe5b072f3f3659b932ab7" + ], + "hooks/relations/aws-integration/pydocmd.yml": [ + "interface:aws-integration", + "static", + "8c242cde2b2517c74de8ad6b1b90d2f6d97b2eb86c54edaf2eb8a8f7d32913e8" + ], + "hooks/relations/aws-integration/requires.py": [ + "interface:aws-integration", + "static", + "3006d6a2607bc15507bec3e6144093c6938a51a22eee1f550d714ff702728c39" + ], + "hooks/relations/azure-integration/.gitignore": [ + "interface:azure-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/azure-integration/LICENSE": [ + "interface:azure-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/azure-integration/README.md": [ + "interface:azure-integration", + "static", + "c7799dba9471709e086dcd2ea272ad7a6e33f5058d875ce2bf5b3a6939d4a1e7" + ], + "hooks/relations/azure-integration/__init__.py": [ + "interface:azure-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/azure-integration/copyright": [ + "interface:azure-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/azure-integration/docs/provides.md": [ + "interface:azure-integration", + "static", + "60ae63187cac32c00d9f462f1723c9487960c728beae871f1a409c92196cc1f5" + ], + "hooks/relations/azure-integration/docs/requires.md": [ + "interface:azure-integration", + "static", + "b01e313c8ce3d02093e851bd84d5e8b7ae77b300c4b06b5048bddc78c1ad3eb3" + ], + "hooks/relations/azure-integration/interface.yaml": [ + "interface:azure-integration", + "static", + "cea5bfd87c278bd3f2e8dc00e654930f06d2bd91ef731a063edea14b04d9128a" + ], + "hooks/relations/azure-integration/make_docs": [ + "interface:azure-integration", + "static", + "e76f4a64c2fdc4a9f97a57d6515b4a25f9404d7043f2792db5206bc44213927c" + ], + "hooks/relations/azure-integration/provides.py": [ + "interface:azure-integration", + "static", + "33af701c7abd51e869de945c1f032749136c66560bb604e8e72521dc9d7e495b" + ], + "hooks/relations/azure-integration/pydocmd.yml": [ + "interface:azure-integration", + "static", + "4c17085efb4ec328891b49257413eed4d9a552eeea8e589509e48081effe51ed" + ], + "hooks/relations/azure-integration/requires.py": [ + "interface:azure-integration", + "static", + "2e60fecf8bc65d84124742d0833afc90d2e839f5dfa2923e8d1849063c51f47a" + ], + "hooks/relations/ceph-admin/.gitignore": [ + "interface:ceph-admin", + "static", + "38da8f2fbf99eb7b9ec38ea900ed13681803bbfa3482929cfeeaec86c591aa50" + ], + "hooks/relations/ceph-admin/README.md": [ + "interface:ceph-admin", + "static", + "805e4836c511fd78ac54e9377ac20430b736bcb96baf4d1106c6779c7c2ae4f4" + ], + "hooks/relations/ceph-admin/__init__.py": [ + "interface:ceph-admin", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/ceph-admin/interface.yaml": [ + "interface:ceph-admin", + "static", + "c9dc8e16173423a4a13dbfa247c48d587c08097529a7060e7cd64b75ef53e19c" + ], + "hooks/relations/ceph-admin/requires.py": [ + "interface:ceph-admin", + "static", + "3ccb57e3d033b0f281a0ebc60d64e1bc43e6e3fd008ba089c36b40955731a372" + ], + "hooks/relations/ceph-client/README.md": [ + "interface:ceph-client", + "static", + "475c8bff2d3041b7e22f4870bb6c8d73ccd88a53f53471dddae8ec5572b6caa2" + ], + "hooks/relations/ceph-client/__init__.py": [ + "interface:ceph-client", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/ceph-client/interface.yaml": [ + "interface:ceph-client", + "static", + "963f7b670b81d9ef1acc4c54a9ee4593f33c1864e199cfcb6cbf9deb15a0f0c4" + ], + "hooks/relations/ceph-client/lib/base_provides.py": [ + "interface:ceph-client", + "static", + "749435e1ea8794722f72838c97536090bc89f423c852040c2131dfb9dc71e0f8" + ], + "hooks/relations/ceph-client/lib/base_requires.py": [ + "interface:ceph-client", + "static", + "105fd680689b85516e0768da7e114dd5fc3b5fb7970ab7bb6d00122c81f7b3e1" + ], + "hooks/relations/ceph-client/provides.py": [ + "interface:ceph-client", + "static", + "ede8c70822bca0fd8ec5da9586ae390afa7e14878e158081fbe2c7ce8bc2f270" + ], + "hooks/relations/ceph-client/requires.py": [ + "interface:ceph-client", + "static", + "ddeebe898592169ffc8b54f8536ed1387981401cf43e40d90972d46bc5353dc6" + ], + "hooks/relations/container-runtime/.gitignore": [ + "interface:container-runtime", + "static", + "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732" + ], + "hooks/relations/container-runtime/LICENSE": [ + "interface:container-runtime", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "hooks/relations/container-runtime/README.md": [ + "interface:container-runtime", + "static", + "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611" + ], + "hooks/relations/container-runtime/__init__.py": [ + "interface:container-runtime", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/container-runtime/interface.yaml": [ + "interface:container-runtime", + "static", + "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6" + ], + "hooks/relations/container-runtime/provides.py": [ + "interface:container-runtime", + "static", + "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06" + ], + "hooks/relations/container-runtime/requires.py": [ + "interface:container-runtime", + "static", + "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761" + ], + "hooks/relations/coordinator/peers.py": [ + "layer:coordinator", + "static", + "d615c442396422a30a0c5f7639750d15bb59247ae5d9362c4f5dc8dd2cc7fff2" + ], + "hooks/relations/etcd/.gitignore": [ + "interface:etcd", + "static", + "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e" + ], + "hooks/relations/etcd/README.md": [ + "interface:etcd", + "static", + "93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386" + ], + "hooks/relations/etcd/__init__.py": [ + "interface:etcd", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/etcd/interface.yaml": [ + "interface:etcd", + "static", + "ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be" + ], + "hooks/relations/etcd/peers.py": [ + "interface:etcd", + "static", + "99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834" + ], + "hooks/relations/etcd/provides.py": [ + "interface:etcd", + "static", + "3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9" + ], + "hooks/relations/etcd/requires.py": [ + "interface:etcd", + "static", + "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e" + ], + "hooks/relations/gcp-integration/.gitignore": [ + "interface:gcp-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/gcp-integration/LICENSE": [ + "interface:gcp-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/gcp-integration/README.md": [ + "interface:gcp-integration", + "static", + "dab3f4a03f02dec0095883054780e3e3f1bf63262b06a9fd499364a3db8b1e97" + ], + "hooks/relations/gcp-integration/__init__.py": [ + "interface:gcp-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/gcp-integration/copyright": [ + "interface:gcp-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/gcp-integration/docs/provides.md": [ + "interface:gcp-integration", + "static", + "a67cda4094b4d601c8de63cf099ba2e83fecf3a8382e88f44e58b98be8872fa6" + ], + "hooks/relations/gcp-integration/docs/requires.md": [ + "interface:gcp-integration", + "static", + "d7e6d7dc90b74d35bf2bd10b00b3ba289ab856dc79ec51046508a85b9dda35a3" + ], + "hooks/relations/gcp-integration/interface.yaml": [ + "interface:gcp-integration", + "static", + "368e8ade9267b905dcb2e6843e7ed61bd6d246f0b0c18942e729f546d5db2260" + ], + "hooks/relations/gcp-integration/make_docs": [ + "interface:gcp-integration", + "static", + "5bf011da5045c31da97a67b8633d30ea90adc6c0d4d823f839fce6e07e5fe222" + ], + "hooks/relations/gcp-integration/provides.py": [ + "interface:gcp-integration", + "static", + "839f15cf978cf94343772889846ad3e2b8375372ef25ed08036207e5608b1f48" + ], + "hooks/relations/gcp-integration/pydocmd.yml": [ + "interface:gcp-integration", + "static", + "2d5a524cbde5ccf732b67382a85deb7c26dfb92315c30d26c2b2d5632a2a8f38" + ], + "hooks/relations/gcp-integration/requires.py": [ + "interface:gcp-integration", + "static", + "79c75c6c76b37bc5ac486ac2e14f853223c4c603850d2f231f187ab255cbdbf0" + ], + "hooks/relations/grafana-dashboard/.gitignore": [ + "interface:grafana-dashboard", + "static", + "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881" + ], + "hooks/relations/grafana-dashboard/LICENSE": [ + "interface:grafana-dashboard", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/grafana-dashboard/README.md": [ + "interface:grafana-dashboard", + "static", + "d46e6c55423b4f0e28f803702632739582f3c0fad5d0427346f210eba8879685" + ], + "hooks/relations/grafana-dashboard/__init__.py": [ + "interface:grafana-dashboard", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/grafana-dashboard/common.py": [ + "interface:grafana-dashboard", + "static", + "965f19c07d3475d7fe5a21235dc0cf1a27f11da9dad498d0cd1a51260b999aa3" + ], + "hooks/relations/grafana-dashboard/copyright": [ + "interface:grafana-dashboard", + "static", + "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4" + ], + "hooks/relations/grafana-dashboard/docs/common.md": [ + "interface:grafana-dashboard", + "static", + "ab69cc6e293b66175dfeee09707f8d02659ae5ba5b9aa4c441295a1025db12f7" + ], + "hooks/relations/grafana-dashboard/docs/provides.md": [ + "interface:grafana-dashboard", + "static", + "626b5655ce1e9f7733c86379fe67709e840b760046d899e5d761b034f94d939e" + ], + "hooks/relations/grafana-dashboard/docs/requires.md": [ + "interface:grafana-dashboard", + "static", + "4f78cff5a0395aff8477267e925066bfa93654eaeb4ba812c682f968171cca55" + ], + "hooks/relations/grafana-dashboard/interface.yaml": [ + "interface:grafana-dashboard", + "static", + "97e4c9a33360708668aa0330323fe9e9e5e95fa5a1e02d4f6b8e8dc60e155b52" + ], + "hooks/relations/grafana-dashboard/provides.py": [ + "interface:grafana-dashboard", + "static", + "cd63928094e6d34be92944ce65cb5b01ff9ba2bd9646036d006fa743a3c0fdb5" + ], + "hooks/relations/grafana-dashboard/requires.py": [ + "interface:grafana-dashboard", + "static", + "b071b9e66a3206351f563d7a4d160499b13a6af29d80930cb01720b5974e1dd2" + ], + "hooks/relations/hacluster/.stestr.conf": [ + "interface:hacluster", + "static", + "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774" + ], + "hooks/relations/hacluster/README.md": [ + "interface:hacluster", + "static", + "7fad91e409c6e559cdb76d11c89c325531adc25679049a629a28c4f890755f1f" + ], + "hooks/relations/hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/copyright": [ + "interface:hacluster", + "static", + "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893" + ], + "hooks/relations/hacluster/interface.yaml": [ + "interface:hacluster", + "static", + "5f4e6c8d7b2884bdceeee422821f4db7163dbfa7994d86cb405ffef2c3dea43c" + ], + "hooks/relations/hacluster/interface_hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/interface_hacluster/common.py": [ + "interface:hacluster", + "static", + "eabe164702e7a98dd7e05e1ed34e556cfad4f43b37b015c8e21b51c84a316a2c" + ], + "hooks/relations/hacluster/requires.py": [ + "interface:hacluster", + "static", + "68cf3ed22af30e42f34fc70ca484e8e4eeaedac6410bd3f228677cc791e6f46c" + ], + "hooks/relations/hacluster/test-requirements.txt": [ + "interface:hacluster", + "static", + "63756e4b1c67bc161cee0d30d460dbb83911b2c064dc1c55454a30c1ab877616" + ], + "hooks/relations/http/.gitignore": [ + "interface:http", + "static", + "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8" + ], + "hooks/relations/http/README.md": [ + "interface:http", + "static", + "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d" + ], + "hooks/relations/http/__init__.py": [ + "interface:http", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/http/interface.yaml": [ + "interface:http", + "static", + "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13" + ], + "hooks/relations/http/provides.py": [ + "interface:http", + "static", + "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877" + ], + "hooks/relations/http/requires.py": [ + "interface:http", + "static", + "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd" + ], + "hooks/relations/keystone-credentials/.gitignore": [ + "interface:keystone-credentials", + "static", + "ddc61d479977d318682280fa2b18bcb6cb9a1b0e0e7897cea3d14d5c8d222e68" + ], + "hooks/relations/keystone-credentials/.gitreview": [ + "interface:keystone-credentials", + "static", + "79122a6758c1a504d6caa55ca329e9028caf5d9a52516a4a77be2a1e676d45c8" + ], + "hooks/relations/keystone-credentials/.stestr.conf": [ + "interface:keystone-credentials", + "static", + "46965969e6df6ac729b7dac68d57bc4e677e9f4d79d445be77f54ca3b9e58774" + ], + "hooks/relations/keystone-credentials/.zuul.yaml": [ + "interface:keystone-credentials", + "static", + "c240e43920d05095cf5a0a9aa648685676c12bdcbb3874b79bbec5b5e7b18b7c" + ], + "hooks/relations/keystone-credentials/__init__.py": [ + "interface:keystone-credentials", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/keystone-credentials/copyright": [ + "interface:keystone-credentials", + "static", + "7a296596102da98cecee289a195e00d6af44241911321699b3d4d4af93f11893" + ], + "hooks/relations/keystone-credentials/interface.yaml": [ + "interface:keystone-credentials", + "static", + "daa50ddd8948bdd6d6f8838498aa4251219f3bbe23344a05477764e6fc5ca33f" + ], + "hooks/relations/keystone-credentials/provides.py": [ + "interface:keystone-credentials", + "static", + "67b853e714b2f43cbd671a4d6c1b85330938a6d8e24da9bf88236efcbe033499" + ], + "hooks/relations/keystone-credentials/requires.py": [ + "interface:keystone-credentials", + "static", + "92d591067b288de5336e6228a2c84be5839354bfd050d7ce84df62c03a813785" + ], + "hooks/relations/keystone-credentials/test-requirements.txt": [ + "interface:keystone-credentials", + "static", + "38a6e3c379a0689eb8f95d0107865847d528c020561669aad4287e1108df6ca7" + ], + "hooks/relations/kube-control/.travis.yml": [ + "interface:kube-control", + "static", + "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" + ], + "hooks/relations/kube-control/README.md": [ + "interface:kube-control", + "static", + "66ee58f59efceefa21f7f2d7f88c1d75c07a16bbec8d09a83a7fda6373eab421" + ], + "hooks/relations/kube-control/__init__.py": [ + "interface:kube-control", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kube-control/interface.yaml": [ + "interface:kube-control", + "static", + "07e3d781283ecbb59c780cc8e4aeb9f030f22d2db6c28d731b74a36ab126960d" + ], + "hooks/relations/kube-control/provides.py": [ + "interface:kube-control", + "static", + "5a99a8549e0c9b41fbc1800b39a1ac2df8c46e1d33ec6c295c6ab139cd28ed56" + ], + "hooks/relations/kube-control/requires.py": [ + "interface:kube-control", + "static", + "c5650e6db3d47b3770e72ddddc68bfd84b0a643866cf67495c148625179a2465" + ], + "hooks/relations/kube-dns/README.md": [ + "interface:kube-dns", + "static", + "f02265c0931c5582cbad911050ee1578c370e4ecaffdbf56d11505f97ce44fee" + ], + "hooks/relations/kube-dns/__init__.py": [ + "interface:kube-dns", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kube-dns/interface.yaml": [ + "interface:kube-dns", + "static", + "e4ca8faafe4cce43eed862d35346780df4cba4eb243baaf5aecd891514deb26d" + ], + "hooks/relations/kube-dns/provides.py": [ + "interface:kube-dns", + "static", + "f0ea4f0610779a70860d5257f0760f62ea2ec682c5f005ba5afff92c9824aa36" + ], + "hooks/relations/kube-dns/requires.py": [ + "interface:kube-dns", + "static", + "38b819b7ee98c3c38142d2cc8122dedd9d8c0f34767c5cc11392a564f38db370" + ], + "hooks/relations/kube-masters/README.md": [ + "interface:kube-masters", + "static", + "ba5816b187473639c31dade1cade2df0fcb25cb20aa70ad273d294694c3400f7" + ], + "hooks/relations/kube-masters/__init__.py": [ + "interface:kube-masters", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kube-masters/interface.yaml": [ + "interface:kube-masters", + "static", + "b7402e029bb06c33a98c137be525555fcaf9eff762004c8f27c873e0c70c87db" + ], + "hooks/relations/kube-masters/peers.py": [ + "interface:kube-masters", + "static", + "b44322e7af30c28592eef5f1c707189a98351f0a51400d68c9466cad811a4310" + ], + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", + "static", + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" + ], + "hooks/relations/kubernetes-cni/.gitignore": [ + "interface:kubernetes-cni", + "static", + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" + ], + "hooks/relations/kubernetes-cni/README.md": [ + "interface:kubernetes-cni", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kubernetes-cni/__init__.py": [ + "interface:kubernetes-cni", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kubernetes-cni/interface.yaml": [ + "interface:kubernetes-cni", + "static", + "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d" + ], + "hooks/relations/kubernetes-cni/provides.py": [ + "interface:kubernetes-cni", + "static", + "2da15a0d547c3d3a6fb4745078a54d61136362c343fdf8635de14dbf714ba264" + ], + "hooks/relations/kubernetes-cni/requires.py": [ + "interface:kubernetes-cni", + "static", + "2544a8ea5f5947f8b729a0db1efe9506d2bba819ba2798eba1437a6a725c17d4" + ], + "hooks/relations/nrpe-external-master/README.md": [ + "interface:nrpe-external-master", + "static", + "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636" + ], + "hooks/relations/nrpe-external-master/__init__.py": [ + "interface:nrpe-external-master", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/nrpe-external-master/interface.yaml": [ + "interface:nrpe-external-master", + "static", + "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0" + ], + "hooks/relations/nrpe-external-master/provides.py": [ + "interface:nrpe-external-master", + "static", + "54e5400de99c051ecf6453776ad416b1cb8c6b73b34cbe2f41b617a8ed7b9daa" + ], + "hooks/relations/openstack-integration/.gitignore": [ + "interface:openstack-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/openstack-integration/LICENSE": [ + "interface:openstack-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/openstack-integration/README.md": [ + "interface:openstack-integration", + "static", + "ca58e21bd973f6e65f7a8a06b4aeabd50bf137ab6fab9c8defa8789b02df3aa5" + ], + "hooks/relations/openstack-integration/__init__.py": [ + "interface:openstack-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/openstack-integration/copyright": [ + "interface:openstack-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/openstack-integration/docs/provides.md": [ + "interface:openstack-integration", + "static", + "ec4b81da3dfeac892f94053d753b56e504f5fd9c6ec4e743efa40efade3aa651" + ], + "hooks/relations/openstack-integration/docs/requires.md": [ + "interface:openstack-integration", + "static", + "95424fe767a26e3208800b4099f8768212b0a72b989ee145f181b67d678e3bbe" + ], + "hooks/relations/openstack-integration/interface.yaml": [ + "interface:openstack-integration", + "static", + "11b07a41bd2e24765231c4b7c7218da15f2173398d8d73698ecb210e599d02f6" + ], + "hooks/relations/openstack-integration/make_docs": [ + "interface:openstack-integration", + "static", + "a564aac288cc0bf4ff14418a341f11b065988c2b64adf93ec451e09dd92dcea5" + ], + "hooks/relations/openstack-integration/provides.py": [ + "interface:openstack-integration", + "static", + "b057676b2d51e99d3df4c7b2699887394c20228aeed692cd64fa832fb84b392d" + ], + "hooks/relations/openstack-integration/pydocmd.yml": [ + "interface:openstack-integration", + "static", + "3568f8a3c1446dfd736f31050e2b470bf125cc41717d156a4b866c7ea53861be" + ], + "hooks/relations/openstack-integration/requires.py": [ + "interface:openstack-integration", + "static", + "2fb96bf45e0b24d2da57f56c640b163b5ee4df4d698f7481af6efa3470d16263" + ], + "hooks/relations/prometheus-manual/.gitignore": [ + "interface:prometheus-manual", + "static", + "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881" + ], + "hooks/relations/prometheus-manual/LICENSE": [ + "interface:prometheus-manual", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/prometheus-manual/README.md": [ + "interface:prometheus-manual", + "static", + "506d4a334ebbe40905c76fc74e4ab5285d836ac28c7d1087b85b5a304960be2e" + ], + "hooks/relations/prometheus-manual/__init__.py": [ + "interface:prometheus-manual", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/prometheus-manual/common.py": [ + "interface:prometheus-manual", + "static", + "013107b3bc8f148779ada8097db725ac9c3d22c605a5794cb8bae95cace9fa4c" + ], + "hooks/relations/prometheus-manual/copyright": [ + "interface:prometheus-manual", + "static", + "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4" + ], + "hooks/relations/prometheus-manual/docs/common.md": [ + "interface:prometheus-manual", + "static", + "91b9e9300a2fef2ce1112cdc57a224ee06ab513ea127edc8a59b6ce9c715cd25" + ], + "hooks/relations/prometheus-manual/docs/provides.md": [ + "interface:prometheus-manual", + "static", + "6b226c2587dbf5b304e6466f2b31bbb208512896b2ab057b11b646cf3501e292" + ], + "hooks/relations/prometheus-manual/docs/requires.md": [ + "interface:prometheus-manual", + "static", + "0100bdc38afd892336747eac005260bc9656ffc1a40f9fb0faef824ab07c1021" + ], + "hooks/relations/prometheus-manual/interface.yaml": [ + "interface:prometheus-manual", + "static", + "4a268318ee2adcc8a5a3482d49595d3805f94bf8976bd1ee4a4f7f9db89e472e" + ], + "hooks/relations/prometheus-manual/provides.py": [ + "interface:prometheus-manual", + "static", + "232917934637d8905ddcd448ce51c2c30dcb9217e043592be356d510c09190c4" + ], + "hooks/relations/prometheus-manual/requires.py": [ + "interface:prometheus-manual", + "static", + "0492a9f1037f39479f2e607162aa48ca67451e00124541a7d56f7e0a920903e0" + ], + "hooks/relations/public-address/README.md": [ + "interface:public-address", + "static", + "7225effe61bfd8571447b8b685a2ecb52be17431b3066a5306330954c4cb064d" + ], + "hooks/relations/public-address/__init__.py": [ + "interface:public-address", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/public-address/interface.yaml": [ + "interface:public-address", + "static", + "49d6777a54aa84c7d3be8d531be237564e90f2e4cb2be05ef5617a372a382340" + ], + "hooks/relations/public-address/provides.py": [ + "interface:public-address", + "static", + "7c99b0fe987d38773ed3e67c0378fdb78748c04d6895489cd4bca40aaeb051b2" + ], + "hooks/relations/public-address/requires.py": [ + "interface:public-address", + "static", + "d6a7c6c0762d29a5db19afb4cf82af50812988d5e19a3a48fcbe8b0f6fec12a5" + ], + "hooks/relations/tls-certificates/.gitignore": [ + "interface:tls-certificates", + "static", + "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc" + ], + "hooks/relations/tls-certificates/README.md": [ + "interface:tls-certificates", + "static", + "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a" + ], + "hooks/relations/tls-certificates/__init__.py": [ + "interface:tls-certificates", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/tls-certificates/docs/common.md": [ + "interface:tls-certificates", + "static", + "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff" + ], + "hooks/relations/tls-certificates/docs/provides.md": [ + "interface:tls-certificates", + "static", + "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe" + ], + "hooks/relations/tls-certificates/docs/requires.md": [ + "interface:tls-certificates", + "static", + "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e" + ], + "hooks/relations/tls-certificates/interface.yaml": [ + "interface:tls-certificates", + "static", + "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa" + ], + "hooks/relations/tls-certificates/make_docs": [ + "interface:tls-certificates", + "static", + "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37" + ], + "hooks/relations/tls-certificates/provides.py": [ + "interface:tls-certificates", + "static", + "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c" + ], + "hooks/relations/tls-certificates/pydocmd.yml": [ + "interface:tls-certificates", + "static", + "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8" + ], + "hooks/relations/tls-certificates/requires.py": [ + "interface:tls-certificates", + "static", + "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba" + ], + "hooks/relations/tls-certificates/tls_certificates_common.py": [ + "interface:tls-certificates", + "static", + "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c" + ], + "hooks/relations/vault-kv/.gitignore": [ + "interface:vault-kv", + "static", + "996ad92a4713473baf27997a048901fdfa0039b9497bcc916f3f50b9000c1b96" + ], + "hooks/relations/vault-kv/README.md": [ + "interface:vault-kv", + "static", + "30082282d57b9a7c1d0bc0311ea0a2b9d50dd8f74829696b413524a4bbffb635" + ], + "hooks/relations/vault-kv/__init__.py": [ + "interface:vault-kv", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/vault-kv/copyright": [ + "interface:vault-kv", + "static", + "d72972f963502ab390e2b3cdb72cc0f49afa0ef9e7d6e589607d260e6f9a577f" + ], + "hooks/relations/vault-kv/interface.yaml": [ + "interface:vault-kv", + "static", + "e021758bd6e3536c2cbc30f08354dd23c11e2a7cc4d3b93584d3646fa64c331d" + ], + "hooks/relations/vault-kv/provides.py": [ + "interface:vault-kv", + "static", + "82d6f62f8e92f12fe43a8803b17be29731c7e4e4b94ca53f6f141d2a3f0a5df4" + ], + "hooks/relations/vault-kv/requires.py": [ + "interface:vault-kv", + "static", + "eaa5e8eb962fcf9d3f655d88f3e27958ac3b2b87a16904bca7d426fb6136ac27" + ], + "hooks/relations/vault-kv/test-requirements.txt": [ + "interface:vault-kv", + "static", + "41b5d0f807a3166c534aa01f773dbdfbefcc9af37e369159a9dba6f0a8c75a78" + ], + "hooks/relations/vsphere-integration/.gitignore": [ + "interface:vsphere-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/vsphere-integration/LICENSE": [ + "interface:vsphere-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/vsphere-integration/README.md": [ + "interface:vsphere-integration", + "static", + "8de815f0f938cb8f58c536899ed87e55aac507a782093bd50d50bd3c1d6add1c" + ], + "hooks/relations/vsphere-integration/__init__.py": [ + "interface:vsphere-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/vsphere-integration/copyright": [ + "interface:vsphere-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/vsphere-integration/docs/provides.md": [ + "interface:vsphere-integration", + "static", + "daa3c44a2df6d774adc60bde1160f1e307129be9d696f018eab4a7e713ee775a" + ], + "hooks/relations/vsphere-integration/docs/requires.md": [ + "interface:vsphere-integration", + "static", + "4e79bb1b151f1de63b423d39a6e1831efbb6f767fe5b84963162f62c6bbb9123" + ], + "hooks/relations/vsphere-integration/interface.yaml": [ + "interface:vsphere-integration", + "static", + "20295b882dfb9a1750d8e988eaa3383cd3109fae510785ba4e415d7fa9b118af" + ], + "hooks/relations/vsphere-integration/make_docs": [ + "interface:vsphere-integration", + "static", + "cd9d91049ee3c6e6148f4bd9204a34463dde905ce665cff25be014ffc1b81b89" + ], + "hooks/relations/vsphere-integration/provides.py": [ + "interface:vsphere-integration", + "static", + "8ccb09c4a3009b59caea227ef40395fb063d3e8ce983338060fb59bbe74138c0" + ], + "hooks/relations/vsphere-integration/pydocmd.yml": [ + "interface:vsphere-integration", + "static", + "9f8eb566569977f10955da67def28886737e80914ae000e4acfae1313d08f105" + ], + "hooks/relations/vsphere-integration/requires.py": [ + "interface:vsphere-integration", + "static", + "d56702f60037f06259752d3bd7882f7ee46f60a4ce7b6d1071520d69ec9351f9" + ], + "hooks/start": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/stop": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/update-status": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/upgrade-charm": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vault-kv-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vault-kv-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vault-kv-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vault-kv-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vault-kv-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "icon.svg": [ + "kubernetes-control-plane", + "static", + "f06a2a469070d8b7325a8e648e021812f0f0dba7dfdffe0ca55113ac281f831f" + ], + "layer.yaml": [ + "kubernetes-control-plane", + "dynamic", + "79f672293369003c8d4e7c2c0793cfce083899ccf9e5bed817c5abdaf00f744a" + ], + "lib/charms/apt.py": [ + "layer:apt", + "static", + "c7613992eb33ac94d83fbf02f467b614ea5112eaf561c4715def90989cefa531" + ], + "lib/charms/coordinator.py": [ + "layer:coordinator", + "static", + "6dbacc87605be8efcbf19ec05341e4eb210327724495c79998a46947e034dbea" + ], + "lib/charms/layer/__init__.py": [ + "layer:basic", + "static", + "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f" + ], + "lib/charms/layer/basic.py": [ + "layer:basic", + "static", + "d120158e0c305a3b4529426a1a63a2f59af4f5730dccf3a59a9ffe1988494cee" + ], + "lib/charms/layer/execd.py": [ + "layer:basic", + "static", + "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d" + ], + "lib/charms/layer/hacluster.py": [ + "layer:hacluster", + "static", + "f58e0c1503187247f858ff3c9a1166d59107afd1557ba89e4878ec2e79304f8a" + ], + "lib/charms/layer/kubernetes_common.py": [ + "layer:kubernetes-common", + "static", + "bc89bd609a8e94102e00a192b7ae3caa813cca5e356536330494742bfdb6c4cb" + ], + "lib/charms/layer/kubernetes_control_plane.py": [ + "kubernetes-control-plane", + "static", + "71e39cc61b8a6ee734c2073cf3b0488ec460a8028486f171d644aa7c6e879e28" + ], + "lib/charms/layer/kubernetes_node_base.py": [ + "layer:kubernetes-node-base", + "static", + "a7aee0b46a033497762d3e2d4e4308c56a3da72b693bf23d58c1bd4dcd9426d1" + ], + "lib/charms/layer/nagios.py": [ + "layer:nagios", + "static", + "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c" + ], + "lib/charms/layer/options.py": [ + "layer:options", + "static", + "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2" + ], + "lib/charms/layer/snap.py": [ + "layer:snap", + "static", + "cac372a755d27c4aed87f2ad87e17d1bb5157f7e262ca6d249b1aac70a986a22" + ], + "lib/charms/layer/status.py": [ + "layer:status", + "static", + "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6" + ], + "lib/charms/layer/tls_client.py": [ + "layer:tls-client", + "static", + "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e" + ], + "lib/charms/layer/vault_kv.py": [ + "layer:vault-kv", + "static", + "9b7f9e90ceedf611078df5ca81bc5d95db863dd2bd8dd03420de4a43ca33f83f" + ], + "lib/charms/layer/vaultlocker.py": [ + "layer:vaultlocker", + "static", + "fc2ae363cc3c8a9b7d46b9ec1b96b53b97c357087a8de9ae90786586584b7eb5" + ], + "lib/charms/leadership.py": [ + "layer:leadership", + "static", + "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332" + ], + "lib/debug_script.py": [ + "layer:debug", + "static", + "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453" + ], + "lxd-profile.yaml": [ + "kubernetes-control-plane", + "static", + "e62700f1993721652d83756f89e1f8b33c5d0dec6fb27554f61aaf96ccd4e379" + ], + "make_docs": [ + "layer:vaultlocker", + "static", + "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1" + ], + "manifest.yaml": [ + "kubernetes-control-plane", + "static", + "ed0a9900c7c3eb181ac0734d3df18f9647bf380e2520dd490130fe8c52b63c21" + ], + "metadata.yaml": [ + "kubernetes-control-plane", + "dynamic", + "20f2a7bcfca785c1dbbc0cade3ffa8681f50035ae2778913b87a3710d12252bb" + ], + "metrics.yaml": [ + "kubernetes-control-plane", + "static", + "51805e00187180beb34a06c6c9d08b4a6889e02aec3e9b01043146f0002c8b51" + ], + "pydocmd.yml": [ + "layer:vaultlocker", + "static", + "145103565659638229fec4c2d6ad7161746a75f13167d1aa16c5cb66081faf82" + ], + "pyproject.toml": [ + "layer:apt", + "static", + "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de" + ], + "reactive/__init__.py": [ + "layer:coordinator", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "reactive/apt.py": [ + "layer:apt", + "static", + "6fe40f18eb84a910a71a4acb7ec74856128de846de6029b4fc297a875692c837" + ], + "reactive/cdk_service_kicker.py": [ + "layer:cdk-service-kicker", + "static", + "cc2648443016a18324ecb26acb71d69c71610ba23df235f280383552136f7efc" + ], + "reactive/coordinator.py": [ + "layer:coordinator", + "static", + "18cda7ddf00ae0e47578d489fc3ebb376b4428cd0559797a87ddbead54360d02" + ], + "reactive/hacluster.py": [ + "layer:hacluster", + "static", + "7b56e9efc95ace190694e439eff210f0981811f89dc46a026a400e114f3f833d" + ], + "reactive/kubernetes_control_plane.py": [ + "kubernetes-control-plane", + "static", + "7f889bf824f218a52153743a0ee93f67f4c7de96804d58112ed656de2856ccce" + ], + "reactive/kubernetes_node_base.py": [ + "layer:kubernetes-node-base", + "static", + "ec71ca98f86d11552984054b3ebba9194c0cf71fbfb28b2a2a666afe11979d62" + ], + "reactive/leadership.py": [ + "layer:leadership", + "static", + "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f" + ], + "reactive/snap.py": [ + "layer:snap", + "static", + "de11948e6a44a7186707266235d9fc133e59584c16a8d5d3be163dc0dd3bd46a" + ], + "reactive/status.py": [ + "layer:status", + "static", + "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207" + ], + "reactive/tls_client.py": [ + "layer:tls-client", + "static", + "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8" + ], + "reactive/vault_kv.py": [ + "layer:vault-kv", + "static", + "3b7c88136e5cbb172ab84284ea54bbb40e3817a07b62f115b423b47b60d2fd39" + ], + "reactive/vaultlocker.py": [ + "layer:vaultlocker", + "static", + "28e31d57017933a3b7e44dfd1913bbb3525fa7910b9a43eb6ad320d098160a4f" + ], + "requirements.txt": [ + "layer:basic", + "static", + "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" + ], + "setup.py": [ + "layer:snap", + "static", + "b219c8c6cb138a2f70a8ef9136d1cc3fe6210bd1e28c99fccb5e7ae90d547164" + ], + "templates/cdk-service-kicker": [ + "layer:cdk-service-kicker", + "static", + "b17adff995310e14d1b510337efa0af0531b55e2c487210168829e0dc1a6f99b" + ], + "templates/cdk-service-kicker.service": [ + "layer:cdk-service-kicker", + "static", + "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029" + ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], + "templates/cdk.master.auth-webhook-conf.yaml": [ + "kubernetes-control-plane", + "static", + "11df8c0c1a4157e7a552b864188df1dcdc99153a8b359667b640937251bad678" + ], + "templates/cdk.master.auth-webhook.logrotate": [ + "kubernetes-control-plane", + "static", + "c780038344a061fe3775ca03983702bf39cb8a30e6443ca7b6b3664cf5ac667b" + ], + "templates/cdk.master.auth-webhook.py": [ + "kubernetes-control-plane", + "static", + "f6d73401a45f7c5c3e72f237ab3553d460adf28589e99fefb0a25b955acd95cc" + ], + "templates/cdk.master.auth-webhook.service": [ + "kubernetes-control-plane", + "static", + "ec522a5763702457c957b40ff94798c116863c3fb8f6a5815a828c73e4c68e61" + ], + "templates/cdk.master.leader.file-watcher.path": [ + "kubernetes-control-plane", + "static", + "f5698867fafd661f270c41a7ddb3dccf14f974dff65decc59148888c4bc5a9d3" + ], + "templates/cdk.master.leader.file-watcher.service": [ + "kubernetes-control-plane", + "static", + "257879384d59766b822748fa44c5bcf6904954b0789dc93f034e7fbc79617ceb" + ], + "templates/cdk.master.leader.file-watcher.sh": [ + "kubernetes-control-plane", + "static", + "99a5398879580b219664447f58384244688fad9923f5aed7477403ac4cc813c5" + ], + "templates/ceph.conf": [ + "kubernetes-control-plane", + "static", + "0e8beeff0148ae579744beb63288db7c4e8da1d0342a97f562948d8fe89e1b12" + ], + "templates/create-namespace.yaml.j2": [ + "kubernetes-control-plane", + "static", + "8ace952fc7f873b0ec2c0f9843a3e39023e20da5397d799c442a78749aa96239" + ], + "templates/grafana/autoload/kubernetes.json": [ + "kubernetes-control-plane", + "static", + "085ee057337177c9ea31153b9c910eb7eabd6ca3536cc864ab19b047fd4b2e80" + ], + "templates/grafana/conditional/prometheus.json": [ + "kubernetes-control-plane", + "static", + "f066e89b01609616aca4da14439f51afe8bde0046d254273fb746a9ea582a3c5" + ], + "templates/grafana/conditional/telegraf.json": [ + "kubernetes-control-plane", + "static", + "845f3b66e6899693e53b09dca874d36d387e99c6714f525f6823ab27f81ef220" + ], + "templates/keystone-api-server-webhook.yaml": [ + "kubernetes-control-plane", + "static", + "57d856c3e55fbfddf08a3952c3a4864713c345f6a4fe42aa4861429c85af8de8" + ], + "templates/kube-keystone.sh": [ + "kubernetes-control-plane", + "static", + "f346f743809da597a37b6b3531cfb525de7cd7196827e818770ae57781f7f47b" + ], + "templates/kube-proxy-iptables-fix.sh": [ + "kubernetes-control-plane", + "static", + "62313fd28f76cfc6e5f2dd426c3ca7a728c91bf064d532e39d8e8fb51a115bbb" + ], + "templates/nagios_plugin.py": [ + "kubernetes-control-plane", + "static", + "0627dc0fe546a6262a9e8b0ca265d783d76ef18b546bb2966b4ff4114db1b392" + ], + "templates/prometheus/k8s-api-endpoints.yaml.j2": [ + "kubernetes-control-plane", + "static", + "78af8a158956011c8abfb11895fab3e67c2d7d6a092c09fa0b2ddead1ee9549b" + ], + "templates/prometheus/kube-state-metrics.yaml.j2": [ + "kubernetes-control-plane", + "static", + "cd01643061d21fc061fde4dacd28bd5dda9938ce531868c9c5a529a390ad29ec" + ], + "templates/prometheus/kube-state-telemetry.yaml.j2": [ + "kubernetes-control-plane", + "static", + "278e98c6abe9312053c08e72160181c968fcf447b3902bb608c4971deca7b192" + ], + "templates/prometheus/kubernetes-cadvisor.yaml.j2": [ + "kubernetes-control-plane", + "static", + "df55e745681353b08029262e4e806f6deed99add34c19950ccd0aa7dabcd226d" + ], + "templates/prometheus/kubernetes-nodes.yaml.j2": [ + "kubernetes-control-plane", + "static", + "6f22e3b7a6a87d7d50b1ad3099fccf08677acc434273f032952d8bf7f548c612" + ], + "templates/rbac-pod-security-policy.yaml": [ + "kubernetes-control-plane", + "static", + "b4e7b7c0976f1a0175c0e60b458e3e9d8bd486849033c4cff7d2684793aa603a" + ], + "templates/rbac-proxy.yaml": [ + "kubernetes-control-plane", + "static", + "abb77f196e008fc636c254c89672bb889ca34a91103972c11a5e2e59aa608400" + ], + "templates/service-always-restart.systemd-229.conf": [ + "kubernetes-control-plane", + "static", + "516958fbf8b9a05cc86f6700d0de7bdc6b2ba1847d69fbe1214e23b52e00b064" + ], + "templates/service-always-restart.systemd-latest.conf": [ + "kubernetes-control-plane", + "static", + "37de98817682363d48b3dd2b635f5cfb281533aaa9d3836d1af44f9d6a59984c" + ], + "templates/service-iptables-fix.service": [ + "kubernetes-control-plane", + "static", + "3f8a29c719c175e17a7a69756223babf0e2b56f8a8d69dbbd81e8e0889863669" + ], + "templates/system-monitoring-rbac-role.yaml": [ + "kubernetes-control-plane", + "static", + "a50f45a1e978ffeaf872f961c2f8ff95fbc144462baef42bcdda3c51da03f54f" + ], + "templates/vaultlocker-loop@.service": [ + "layer:vaultlocker", + "static", + "57d81403c04033d382094b3c8a60c4728eb0fad146746921fe7e770b4c49f758" + ], + "templates/vaultlocker.conf.j2": [ + "layer:vaultlocker", + "static", + "7428fcfb91731d37be14a0f8d4c5923cc95a28bd28579c5a013928ab147b0beb" + ], + "tests/data/ip_addr_json": [ + "layer:kubernetes-common", + "static", + "f129576a9e2c7738aca8669c642f123534eda63121ae450cec4cbda787b1eb06" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/unit/conftest.py": [ + "layer:vault-kv", + "static", + "56db93628f3d3e67763b536628a10efa3f694eedbcf9f60c48acf1bd87e2fa8f" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "23e097e7f21e4f4f062caac0146bb85373e895a30be1be5667b90d0e84435882" + ], + "tests/unit/test_layer.py": [ + "layer:kubernetes-node-base", + "static", + "67a2c3f0f8703e020bd92ea169f414e504d4af5c20cf8345ddb6a2d36d4ffa75" + ], + "tests/unit/test_reactive.py": [ + "layer:vault-kv", + "static", + "13d89ddaf82dadaa76ab3ed897a6dd4ef3917115d5be520e10544296796a14de" + ], + "tox.ini": [ + "layer:vaultlocker", + "static", + "716854030c843efa3afd32b8742807f1515b5fc705d8ae81aac9b447ffae411f" + ], + "version": [ + "kubernetes-control-plane", + "dynamic", + "a36d32d4b537bff7998870faf8069acd3e73541bab3bc95f15ba95ad12ec9e99" + ], + "wheelhouse.txt": [ + "kubernetes-control-plane", + "dynamic", + "8a918f7aad333d8462a2e623d1d1b6e18f6070df0a9b784842254fb034f0c35a" + ], + "wheelhouse/Jinja2-3.0.3.tar.gz": [ + "layer:basic", + "dynamic", + "611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7" + ], + "wheelhouse/MarkupSafe-2.0.1.tar.gz": [ + "layer:basic", + "dynamic", + "594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a" + ], + "wheelhouse/PyYAML-5.3.1.tar.gz": [ + "layer:basic", + "dynamic", + "b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d" + ], + "wheelhouse/aiohttp-3.7.4.post0.tar.gz": [ + "kubernetes-control-plane", + "dynamic", + "493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf" + ], + "wheelhouse/async-timeout-3.0.1.tar.gz": [ + "__pip__", + "dynamic", + "0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f" + ], + "wheelhouse/attrs-22.1.0.tar.gz": [ + "__pip__", + "dynamic", + "29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6" + ], + "wheelhouse/cached-property-1.5.2.tar.gz": [ + "__pip__", + "dynamic", + "9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130" + ], + "wheelhouse/certifi-2022.6.15.tar.gz": [ + "__pip__", + "dynamic", + "84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d" + ], + "wheelhouse/chardet-4.0.0.tar.gz": [ + "__pip__", + "dynamic", + "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa" + ], + "wheelhouse/charmhelpers-1.2.1.tar.gz": [ + "layer:basic", + "dynamic", + "298bb9e90d9392e2b66d10a5199b1b2d459dc8d5434b897913325904989dd2d7" + ], + "wheelhouse/charms.reactive-1.5.0.tar.gz": [ + "layer:basic", + "dynamic", + "b56484ed17f412c7738ff21e4ddc0e7c758af2288eac9fe521a86c8c31c1b150" + ], + "wheelhouse/charset-normalizer-2.0.12.tar.gz": [ + "__pip__", + "dynamic", + "2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597" + ], + "wheelhouse/gunicorn-20.1.0.tar.gz": [ + "kubernetes-control-plane", + "dynamic", + "e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8" + ], + "wheelhouse/hvac-0.11.2.tar.gz": [ + "layer:vault-kv", + "dynamic", + "f905c59d32d88d3f67571fe5a8a78de4659e04798ad809de439f667247d13626" + ], + "wheelhouse/idna-3.3.tar.gz": [ + "__pip__", + "dynamic", + "9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d" + ], + "wheelhouse/idna-ssl-1.1.0.tar.gz": [ + "__pip__", + "dynamic", + "a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c" + ], + "wheelhouse/loadbalancer_interface-1.1.1.tar.gz": [ + "kubernetes-control-plane", + "dynamic", + "c71d50bb66286d6e15a5f2975c0a316a3cd43c2042428258c96d1b4b95e5706b" + ], + "wheelhouse/marshmallow-3.14.1.tar.gz": [ + "__pip__", + "dynamic", + "4c05c1684e0e97fe779c62b91878f173b937fe097b356cd82f793464f5bc6138" + ], + "wheelhouse/marshmallow-enum-1.5.1.tar.gz": [ + "__pip__", + "dynamic", + "38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58" + ], + "wheelhouse/multidict-5.2.0.tar.gz": [ + "__pip__", + "dynamic", + "0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce" + ], + "wheelhouse/netaddr-0.7.19.tar.gz": [ + "layer:basic", + "dynamic", + "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd" + ], + "wheelhouse/netifaces-0.11.0.tar.gz": [ + "layer:vault-kv", + "dynamic", + "043a79146eb2907edf439899f262b3dfe41717d34124298ed281139a8b93ca32" + ], + "wheelhouse/ops-1.5.0.tar.gz": [ + "__pip__", + "dynamic", + "1a73753a03d6816045d4a0b4942137e65d74a38da29fad975f7dfbd16e312b0d" + ], + "wheelhouse/ops_reactive_interface-1.0.1.tar.gz": [ + "__pip__", + "dynamic", + "9ed351c42fc187299c23125975aa3dfee9f6aaae0c9d49bce8904ac079255dba" + ], + "wheelhouse/pbr-5.9.0.tar.gz": [ + "__pip__", + "dynamic", + "e8dca2f4b43560edef58813969f52a56cef023146cbb8931626db80e6c1c4308" + ], + "wheelhouse/pip-18.1.tar.gz": [ + "layer:basic", + "dynamic", + "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" + ], + "wheelhouse/psutil-5.9.1.tar.gz": [ + "layer:vault-kv", + "dynamic", + "57f1819b5d9e95cdfb0c881a8a5b7d542ed0b7c522d575706a80bedc848c8954" + ], + "wheelhouse/pyaml-21.10.1.tar.gz": [ + "__pip__", + "dynamic", + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" + ], + "wheelhouse/requests-2.27.1.tar.gz": [ + "__pip__", + "dynamic", + "68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61" + ], + "wheelhouse/setuptools-41.6.0.zip": [ + "layer:basic", + "dynamic", + "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722" + ], + "wheelhouse/setuptools_scm-1.17.0.tar.gz": [ + "layer:basic", + "dynamic", + "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" + ], + "wheelhouse/six-1.16.0.tar.gz": [ + "__pip__", + "dynamic", + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" + ], + "wheelhouse/tenacity-5.0.3.tar.gz": [ + "layer:snap", + "dynamic", + "24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b" + ], + "wheelhouse/typing_extensions-3.10.0.2.tar.gz": [ + "kubernetes-control-plane", + "dynamic", + "49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e" + ], + "wheelhouse/urllib3-1.26.11.tar.gz": [ + "__pip__", + "dynamic", + "ea6e8fb210b19d950fab93b60c9009226c63a28808bc8386e05301e25883ac0a" + ], + "wheelhouse/wheel-0.33.6.tar.gz": [ + "layer:basic", + "dynamic", + "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646" + ], + "wheelhouse/yarl-1.7.2.tar.gz": [ + "__pip__", + "dynamic", + "45399b46d60c253327a460e99856752009fcee5f5d3c80b2f7c0cae1c38d56dd" + ] + } +} \ No newline at end of file diff --git a/kubernetes-control-plane/.github/workflows/main.yaml b/kubernetes-control-plane/.github/workflows/main.yaml new file mode 100644 index 0000000..ba25b2e --- /dev/null +++ b/kubernetes-control-plane/.github/workflows/main.yaml @@ -0,0 +1,31 @@ +name: Test Suite +on: [pull_request] + +jobs: + call-inclusive-naming-check: + name: Inclusive naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + lint-unit: + name: Lint, Unit + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + sudo snap install charm --classic + - name: Lint + run: tox -vve lint + - name: Unit Tests + run: tox -vve unit diff --git a/kubernetes-control-plane/.github/workflows/main.yml b/kubernetes-control-plane/.github/workflows/main.yml new file mode 100644 index 0000000..c51b73d --- /dev/null +++ b/kubernetes-control-plane/.github/workflows/main.yml @@ -0,0 +1,58 @@ +name: Test Suite +on: [pull_request] + +jobs: + call-inclusive-naming-check: + name: Inclusive naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + lint-unit-wheelhouse: + name: Lint, Unit, Wheelhouse + runs-on: ubuntu-latest + strategy: + matrix: + python: + - "3.6" + - "3.7" + - "3.8" + - "3.9" + - "3.10" + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + sudo snap install charm --classic + - name: Lint + run: tox -vve lint + - name: Unit Tests + run: tox -vve unit + - name: Validate Wheelhouse + run: tox -vve validate-wheelhouse + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2763" + - name: Run test + run: tox -e integration diff --git a/kubernetes-control-plane/.gitignore b/kubernetes-control-plane/.gitignore new file mode 100644 index 0000000..6d319d1 --- /dev/null +++ b/kubernetes-control-plane/.gitignore @@ -0,0 +1,7 @@ +.tox/ +__pycache__/ +*.pyc +placeholders/ +*.charm +interfaces +layers diff --git a/kubernetes-control-plane/.travis.yml b/kubernetes-control-plane/.travis.yml new file mode 100644 index 0000000..66d8e1f --- /dev/null +++ b/kubernetes-control-plane/.travis.yml @@ -0,0 +1,7 @@ +language: python +python: + - "3.5" +install: + - pip install tox-travis +script: + - tox diff --git a/kubernetes-control-plane/.wokeignore b/kubernetes-control-plane/.wokeignore new file mode 100644 index 0000000..96c0ecc --- /dev/null +++ b/kubernetes-control-plane/.wokeignore @@ -0,0 +1 @@ +docs/ \ No newline at end of file diff --git a/kubernetes-control-plane/CONTRIBUTING.md b/kubernetes-control-plane/CONTRIBUTING.md new file mode 100644 index 0000000..94b69b7 --- /dev/null +++ b/kubernetes-control-plane/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# Contributor Guide + +This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions +for code, suggestions and documentation. +This page details a few notes, workflows and suggestions for how to make contributions most effective and help us +all build a better charm - please give them a read before working on any contributions. + +## Licensing + +This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may +make to this project. Please familiarise yourself with the terms of the license. + +Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to +use your contributions. +In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your +code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before +making any contributions. + +## Code of conduct + +We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct). + +## Contributing code + +To contribute code to this project, please use the following workflow: + +1. [Submit a bug](https://bugs.launchpad.net/charm-kubernetes-control-plane/+filebug) to explain the need for and track the change. +2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code. +3. Submit a PR. The PR description should include a link to the bug on Launchpad. +4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag. +5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone + for further release according to priority. + +## Documentation + +Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs. +See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/main/pages/k8s/charm-kubernetes-master.md) diff --git a/kubernetes-control-plane/LICENSE b/kubernetes-control-plane/LICENSE new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/kubernetes-control-plane/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/kubernetes-control-plane/Makefile b/kubernetes-control-plane/Makefile new file mode 100644 index 0000000..a1ad3a5 --- /dev/null +++ b/kubernetes-control-plane/Makefile @@ -0,0 +1,24 @@ +#!/usr/bin/make + +all: lint unit_test + + +.PHONY: clean +clean: + @rm -rf .tox + +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) + +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/kubernetes-control-plane/README.md b/kubernetes-control-plane/README.md new file mode 100644 index 0000000..b123660 --- /dev/null +++ b/kubernetes-control-plane/README.md @@ -0,0 +1,54 @@ +# Kubernetes-control-plane + +[Kubernetes](http://kubernetes.io/) is an open source system for managing +application containers across a cluster of hosts. The Kubernetes project was +started by Google in 2014, combining the experience of running production +workloads combined with best practices from the community. + +The Kubernetes project defines some new terms that may be unfamiliar to users +or operators. For more information please refer to the concept guide in the +[getting started guide](https://kubernetes.io/docs/home/). + +This charm is an encapsulation of the Kubernetes control plane processes and the +operations to run on any cloud for the entire lifecycle of the cluster except for +etcd, which is available in a separate charm. + +This charm is built from other charm layers using the Juju reactive framework. +The other layers focus on specific subset of operations making this layer +specific to operations of Kubernetes control-plane processes. + +# Charmed Kubernetes + +This charm is not fully functional when deployed by itself. It requires other +charms to model a complete Kubernetes cluster. A Kubernetes cluster needs a +distributed key value store such as [Etcd](https://coreos.com/etcd/) and the +kubernetes-worker charm which delivers the Kubernetes node services. A cluster +also requires a Software Defined Network (SDN), a Container Runtime such as +[containerd](https://charmhub.io/containerd), and Transport Layer +Security (TLS) so the components in a cluster communicate securely. + +Please take a look at the [Charmed Kubernetes](https://charmhub.io/charmed-kubernetes) +or the [Kubernetes core](https://charmhub.io/kubernetes-core) bundles for +examples of complete models of Kubernetes clusters. + +For full install instructions, please see the [Charmed Kubernetes documentation](https://ubuntu.com/kubernetes/docs/quickstart). + +For details on configuring and operating this charm, see the [kubernetes-control-plane documentation](https://ubuntu.com/kubernetes/docs/charm-kubernetes-master) on the same site. + +# Developers + +## Building the charm + +``` +make charm +``` + +## Testing the charm + +``` +tox +``` + +Note that the unit tests use [`charms.unit_test`](https://pypi.org/project/charms.unit-test/) +so all charms.reactive helpers are automatically patched with fakes and little manual +patching needs to be done. Things like `set_flag` and `is_flag_set` can be used directly. diff --git a/kubernetes-control-plane/actions.yaml b/kubernetes-control-plane/actions.yaml new file mode 100644 index 0000000..0baad99 --- /dev/null +++ b/kubernetes-control-plane/actions.yaml @@ -0,0 +1,109 @@ +"debug": + "description": "Collect debug data" +"cis-benchmark": + "description": | + Run the CIS Kubernetes Benchmark against snap-based components. + "params": + "apply": + "type": "string" + "default": "none" + "description": | + Apply remediations to address benchmark failures. The default, 'none', + will not attempt to fix any reported failures. Set to 'conservative' + to resolve simple failures. Set to 'dangerous' to attempt to resolve + all failures. + + Note: Applying any remediation may result in an unusable cluster. + "config": + "type": "string" + "default": "https://github.com/charmed-kubernetes/kube-bench-config/archive/cis-1.23.zip#sha1=3cda2fc68b4ca36f69f5913bfc0b02576e7a3b3d" + "description": | + Archive containing configuration files to use when running kube-bench. + The default value is known to be compatible with snap components. When + using a custom URL, append '#=' to verify the + archive integrity when downloaded. + "release": + "type": "string" + "default": "https://github.com/aquasecurity/kube-bench/releases/download/v0.6.8/kube-bench_0.6.8_linux_amd64.tar.gz#sha256=5f9c5231949bd022a6993f5297cc05bb80a1b7c36a43cefed0a8c8af26778863" + "description": | + Archive containing the 'kube-bench' binary to run. The default value + points to a stable upstream release. When using a custom URL, append + '#=' to verify the archive integrity when + downloaded. + + This may also be set to the special keyword 'upstream'. In this case, + the action will compile and use a local kube-bench binary built from + the master branch of the upstream repository: + https://github.com/aquasecurity/kube-bench + +"restart": + "description": "Restart the Kubernetes control-plane services on demand." +"namespace-list": + "description": "List existing k8s namespaces" +"namespace-create": + "description": "Create new namespace" + "params": + "name": + "type": "string" + "description": "Namespace name eg. staging" + "minLength": !!int "2" + "required": + - "name" +"namespace-delete": + "description": "Delete namespace" + "params": + "name": + "type": "string" + "description": "Namespace name eg. staging" + "minLength": !!int "2" + "required": + - "name" +"upgrade": + "description": "Upgrade the kubernetes snaps" + "params": + "fix-cluster-name": + "type": "boolean" + "default": !!bool "true" + "description": >- + If using the OpenStack cloud provider, whether to fix the cluster + name sent to it to include the cluster tag. This fixes an issue + with load balancers conflicting with other clusters in the same + project but will cause new load balancers to be created which will + require manual intervention to resolve. +"user-create": + "description": "Create a new user" + "params": + "name": + "type": "string" + "description": | + Username for the new user. This value must only contain alphanumeric + characters, ':', '@', '-' or '.'. + "minLength": !!int "2" + "groups": + "type": "string" + "description": | + Optional comma-separated list of groups eg. 'system:masters,managers' + "required": + - "name" +"user-delete": + "description": "Delete an existing user" + "params": + "name": + "type": "string" + "description": "Username of the user to delete" + "minLength": !!int "2" + "required": + - "name" +"user-list": + "description": "List existing users" +"get-kubeconfig": + "description": "Retrieve Kubernetes cluster config, including credentials" +"apply-manifest": + "description": "Apply JSON formatted Kubernetes manifest to cluster" + "params": + "json": + "type": "string" + "description": "The content of the manifest to deploy in JSON format" + "minLength": !!int "2" + "required": + - "json" diff --git a/kubernetes-control-plane/actions/apply-manifest b/kubernetes-control-plane/actions/apply-manifest new file mode 100755 index 0000000..3d8bea4 --- /dev/null +++ b/kubernetes-control-plane/actions/apply-manifest @@ -0,0 +1,75 @@ +#!/usr/local/sbin/charm-env python3 +import os +import json +import tempfile +import subprocess +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name + + +def _kubectl(args): + """ + Executes kubectl with args as arguments + """ + snap_bin = os.path.join(os.sep, "snap", "bin") + env = os.environ.copy() + env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]]) + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return subprocess.check_output( + cmd, + env=env, + stderr=subprocess.STDOUT, + ) + + +def get_kubeconfig(): + """ + Read the kubeconfig on this control-plane unit and return it as JSON + """ + try: + result = _kubectl(["config", "view", "-o", "json", "--raw"]) + # JSON format verification + kubeconfig = json.dumps(json.loads(result)) + action_set({"kubeconfig": kubeconfig}) + except json.JSONDecodeError as e: + action_fail("Failed to parse kubeconfig: {}".format(str(e))) + except Exception as e: + action_fail("Failed to retrieve kubeconfig: {}".format(str(e))) + + +def apply_manifest(): + """ + Applies a user defined manifest with kubectl + """ + _, apply_path = tempfile.mkstemp(suffix=".json") + try: + manifest = json.loads(action_get("json")) + with open(apply_path, "w") as manifest_file: + json.dump(manifest, manifest_file) + output = _kubectl(["apply", "-f", apply_path]) + + action_set( + { + "summary": "Manifest applied.", + "output": output.decode("utf-8"), + } + ) + except subprocess.CalledProcessError as e: + action_fail( + "kubectl failed with exit code {} and message: {}".format( + e.returncode, e.output + ) + ) + except json.JSONDecodeError as e: + action_fail("Failed to parse JSON manifest: {}".format(str(e))) + except Exception as e: + action_fail("Failed to apply manifest: {}".format(str(e))) + finally: + os.unlink(apply_path) + + +action = action_name() +if action == "get-kubeconfig": + get_kubeconfig() +elif action == "apply-manifest": + apply_manifest() diff --git a/kubernetes-control-plane/actions/cis-benchmark b/kubernetes-control-plane/actions/cis-benchmark new file mode 100755 index 0000000..ed7d763 --- /dev/null +++ b/kubernetes-control-plane/actions/cis-benchmark @@ -0,0 +1,396 @@ +#!/usr/local/sbin/charm-env python3 +import os +import json +import shlex +import shutil +import subprocess +import sys +import tempfile +from pathlib import Path + +import charms.layer +import charms.reactive +from charmhelpers.core import hookenv, unitdata +from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler +from charms.layer import snap +from charms.reactive import clear_flag, is_flag_set, set_flag + + +BENCH_HOME = "/home/ubuntu/kube-bench" +BENCH_BIN = "{}/kube-bench".format(BENCH_HOME) +BENCH_CFG = "{}/cfg-ck".format(BENCH_HOME) +GO_PKG = "github.com/aquasecurity/kube-bench" +RESULTS_DIR = "/home/ubuntu/kube-bench-results" + +# Remediation dicts associate a failing test with a tuple to fix it. +# Conservative fixes will probably leave the cluster in a good state. +# Dangerous fixes will likely break the cluster. +# Tuple examples: +# {'1.2.3': ('manual -- we don't know how to auto fix this', None, None)} +# {'1.2.3': ('cli', 'command to run', None)} +# {'1.2.3': ('kv', 'snap', {cfg_key: value})} +CONSERVATIVE = { + "0.0.0": ("cli", 'echo "this is fine"', None), + # etcd (no known failures with a default install) + # k8s-control-plane (no known failures with a default install) + # k8s-worker (no known failures with a default install) +} +ADMISSION_PLUGINS = { + "enable-admission-plugins": ( + "PersistentVolumeLabel", + "PodSecurityPolicy," "AlwaysPullImages", + "NodeRestriction", + ) +} +DANGEROUS = { + "0.0.0": ("cli", 'echo "this is fine"', None), + # etcd (no known warnings with a default install) + # k8s-control-plane + "1.1.21": ("cli", "chmod -R 600 /root/cdk/*.key", None), + "1.2.9": ("manual", None, None), + "1.2.11": ("kv", "kube-apiserver", ADMISSION_PLUGINS), + "1.2.25": ("manual", None, None), + "1.2.33": ("manual", None, None), + "1.2.34": ("manual", None, None), + # k8s-worker + "4.2.9": ("kv", "kubelet", {"event-qps": 0}), + "4.2.13": ( + "kv", + "kubelet", + { + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305," + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384," + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305," + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384," + "TLS_RSA_WITH_AES_256_GCM_SHA384," + "TLS_RSA_WITH_AES_128_GCM_SHA256" + }, + ), +} + + +def _fail(msg): + """Fail the action with a given message.""" + hookenv.action_fail(msg) + sys.exit() + + +def _move_matching_parent(dirpath, filename, dest): + """Move a parent directory that contains a specific file. + + Helper function that walks a directory looking for a given file. If found, + the file's parent directory is moved to the given destination. + + :param: dirpath: String path to search + :param: filename: String file to find + :param: dest: String destination of the found parent directory + """ + for root, _, files in os.walk(dirpath): + for name in files: + if name == filename: + hookenv.log("Moving {} to {}".format(root, dest)) + shutil.move(root, dest) + return + else: + _fail("Could not find {} in {}".format(filename, dirpath)) + + +def _restart_charm(): + """Set charm-specific flags and call reactive.main().""" + app = hookenv.charm_name() or "unknown" + if "master" in app: + hookenv.log("Restarting master") + clear_flag("kubernetes-master.components.started") + # or this app could have been upgrade to new flags + clear_flag("kubernetes-control-plane.components.started") + elif "control-plane" in app: + hookenv.log("Restarting control-plane") + clear_flag("kubernetes-control-plane.components.started") + elif "worker" in app: + hookenv.log("Restarting worker") + set_flag("kubernetes-worker.restart-needed") + elif "etcd" in app: + hookenv.log("No-op: etcd does not need to be restarted") + return + else: + _fail("Unable to determine the charm to restart: {}".format(app)) + + # Invoke reactive so the charm will react to the flags we just managed + charms.layer.import_layer_libs() + charms.reactive.main() + + +def install(release, config): + """Install kube-bench and related configuration. + + Release and configuration are set via action params. If installing an + upstream release, this method will also install 'go' if needed. + + :param: release: Archive URI or 'upstream' + :param: config: Archive URI of configuration files + """ + if Path(BENCH_HOME).exists(): + shutil.rmtree(BENCH_HOME) + fetcher = ArchiveUrlFetchHandler() + + if release == "upstream": + Path(BENCH_HOME).mkdir(parents=True, exist_ok=True) + + # Setup the 'go' environment + env = os.environ.copy() + go_bin = shutil.which("go", path="{}:/snap/bin".format(env["PATH"])) + if not go_bin: + snap.install("go", channel="stable", classic=True) + go_bin = "/snap/bin/go" + go_cache = os.getenv("GOCACHE", "/var/snap/go/common/cache") + go_path = os.getenv("GOPATH", "/var/snap/go/common") + env["GOCACHE"] = go_cache + env["GOPATH"] = go_path + Path(go_path).mkdir(parents=True, exist_ok=True) + + # From https://github.com/aquasecurity/kube-bench#installing-from-sources + go_cmd = "{bin} get {pkg} " "github.com/golang/dep/cmd/dep".format( + bin=go_bin, pkg=GO_PKG + ) + try: + subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(go_cmd)) + + go_cmd = "{bin} build -o {out} {base}/src/{pkg}".format( + bin=go_bin, out=BENCH_BIN, base=go_path, pkg=GO_PKG + ) + try: + subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(go_cmd)) + else: + # Fetch the release URI and put it in the right place. + archive_path = fetcher.install(source=release) + # NB: We may not know the structure of the archive, but we know the + # directory containing 'kube-bench' belongs in our BENCH_HOME. + _move_matching_parent( + dirpath=archive_path, filename="kube-bench", dest=BENCH_HOME + ) + + # Fetch the config URI and put it in the right place. + archive_dir = fetcher.install(source=config) + # NB: We may not know the structure of the archive, but we know the + # directory containing 'config.yaml' belongs in our BENCH_CFG. + _move_matching_parent(dirpath=archive_dir, filename="config.yaml", dest=BENCH_CFG) + + +def apply(remediations=None): + """Apply remediations to address benchmark failures. + + :param: remediations: either 'conservative' or 'dangerous' + """ + applied_fixes = 0 + danger = True if remediations == "dangerous" else False + db = unitdata.kv() + + json_log = report(log_format="json") + hookenv.log("Loading JSON from: {}".format(json_log)) + try: + with open(json_log, "r") as f: + full_json = json.load(f) + except Exception: + _fail("Failed to load: {}".format(json_log)) + + full_json = full_json.get("Controls")[0] if "Controls" in full_json else full_json + for test in full_json.get("tests", {}): + for result in test.get("results", {}): + test_num = result.get("test_number") + test_remediation = result.get("remediation") + test_status = result.get("status", "") + + if test_status.lower() in ("fail", "warn"): + test_remedy = CONSERVATIVE.get(test_num) + if not test_remedy and danger: + # no conservative remedy, check dangerous if user wants + test_remedy = DANGEROUS.get(test_num) + if isinstance(test_remedy, tuple): + if test_remedy[0] == "manual": + # we don't know how to autofix; log remediation text + hookenv.log( + "Test {}: unable to auto-apply remedy.\n" + "Manual steps:\n{}".format(test_num, test_remediation) + ) + elif test_remedy[0] == "cli": + cmd = shlex.split(test_remedy[1]) + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + _fail("Test {}: failed to run: {}".format(test_num, cmd)) + else: + hookenv.log( + "Test {}: applied remedy: {}\n" + "Output: {}".format(test_num, cmd, out) + ) + applied_fixes += 1 + elif test_remedy[0] == "kv": + cfg_key = "cis-" + test_remedy[1] + cfg = db.get(cfg_key) or {} + cfg.update(test_remedy[2]) + db.set(cfg_key, cfg) + + hookenv.log( + "Test {}: updated configuration: {}\n".format(test_num, cfg) + ) + applied_fixes += 1 + else: + hookenv.log("Test {}: remediation is missing".format(test_num)) + + # CLI and KV changes will require a charm restart; do it. + if applied_fixes > 0: + _restart_charm() + + msg = ( + 'Applied {} remediations. Re-run with "apply=none" to generate a ' "new report." + ).format(applied_fixes) + hookenv.action_set({"summary": msg}) + + +def reset(): + """Reset any remediations we applied to unitdata.kv(). + + This action does not track individual remediations to reset. Therefore, + this function unconditionally unsets all 'cis-' prefixed arguments that + this action may have set and restarts the relevant charm. + """ + db = unitdata.kv() + + db.unset("cis-kube-apiserver") + db.unset("cis-kube-scheduler") + db.unset("cis-kube-controller-manager") + db.unset("cis-kubelet") + _restart_charm() + + hookenv.action_set( + { + "summary": ( + "Reset is complete. Re-run with " + '"apply=none" to generate a new report.' + ) + } + ) + + +def report(log_format="text"): + """Run kube-bench and report results. + + By default, save the full plain-text results to our RESULTS_DIR and set + action output with a summary. This function can also save full results in + a machine-friendly json format. + + :param: log_format: String determines if output is text or json + :returns: Path to results log + """ + Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True) + + # Node type is different depending on the charm + app = hookenv.charm_name() or "unknown" + version = "cis-1.23" + if "master" in app: + target = "master" + if "control-plane" in app: + # must refer to this as upstream kube-bench tests do + # wokeignore:rule=master + target = "master" + elif "worker" in app: + target = "node" + elif "etcd" in app: + target = "etcd" + else: + _fail("Unable to determine the target to benchmark: {}".format(app)) + + # Commands and log names are different depending on the format + if log_format == "json": + log_prefix = "results-json-" + verbose_cmd = ( + "{bin} -D {cfg} --benchmark {ver} --json run " "--targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + else: + log_prefix = "results-text-" + verbose_cmd = ( + "{bin} -D {cfg} --benchmark {ver} run " "--targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + + summary_cmd = ( + "{bin} -D {cfg} --benchmark {ver} " + "--noremediations --noresults run --targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + + # Store full results for future consumption + with tempfile.NamedTemporaryFile( + mode="w+b", prefix=log_prefix, dir=RESULTS_DIR, delete=False + ) as res_file: + try: + subprocess.call( + shlex.split(verbose_cmd), stdout=res_file, stderr=subprocess.DEVNULL + ) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(verbose_cmd)) + else: + # remember the filename for later (and make it readable, why not?) + Path(res_file.name).chmod(0o644) + log = res_file.name + + # When making a summary, we also have a verbose report. Set action output + # so operators can see everything related to this run. + try: + out = subprocess.check_output( + shlex.split(summary_cmd), universal_newlines=True, stderr=subprocess.DEVNULL + ) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(summary_cmd)) + else: + fetch_cmd = "juju scp {unit}:{file} .".format( + unit=hookenv.local_unit(), file=log + ) + hookenv.action_set({"cmd": summary_cmd, "report": fetch_cmd, "summary": out}) + + return log or None + + +if __name__ == "__main__": + if not ( + is_flag_set("snap.installed.etcd") + or is_flag_set("kubernetes-master.snaps.installed") + or is_flag_set("kubernetes-control-plane.snaps.installed") + or is_flag_set("kubernetes-worker.snaps.installed") + or is_flag_set("kubernetes-node.snaps.installed") + ): + msg = "Snaps are not yet installed on this unit." + _fail(msg) + + # Validate action params + release = hookenv.action_get("release") or "upstream" + config = hookenv.action_get("config") + if not config: + msg = 'Missing "config" parameter' + _fail(msg) + remediations = hookenv.action_get("apply") + if remediations not in ["none", "conservative", "dangerous", "reset"]: + msg = 'Invalid "apply" parameter: {}'.format(remediations) + _fail(msg) + + # TODO: may want an option to overwrite an existing install + if Path(BENCH_BIN).exists() and Path(BENCH_CFG).exists(): + hookenv.log("{} exists; skipping install".format(BENCH_HOME)) + else: + hookenv.log("Installing benchmark from: {}".format(release)) + install(release, config) + + # Reset, remediate, or report + if remediations == "reset": + hookenv.log("Attempting to remove all remediations") + reset() + elif remediations != "none": + hookenv.log('Applying "{}" remediations'.format(remediations)) + apply(remediations) + else: + hookenv.log("Report only; no remediations were requested") + report(log_format="text") diff --git a/kubernetes-control-plane/actions/debug b/kubernetes-control-plane/actions/debug new file mode 100755 index 0000000..8ba160e --- /dev/null +++ b/kubernetes-control-plane/actions/debug @@ -0,0 +1,102 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import tarfile +import tempfile +import traceback +from contextlib import contextmanager +from datetime import datetime +from charmhelpers.core.hookenv import action_set, local_unit + +archive_dir = None +log_file = None + + +@contextmanager +def archive_context(): + """ Open a context with a new temporary directory. + + When the context closes, the directory is archived, and the archive + location is added to Juju action output. """ + global archive_dir + global log_file + with tempfile.TemporaryDirectory() as temp_dir: + name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S") + archive_dir = os.path.join(temp_dir, name) + os.makedirs(archive_dir) + with open("%s/debug.log" % archive_dir, "w") as log_file: + yield + os.chdir(temp_dir) + tar_path = "/home/ubuntu/%s.tar.gz" % name + with tarfile.open(tar_path, "w:gz") as f: + f.add(name) + action_set({ + "path": tar_path, + "command": "juju scp %s:%s ." % (local_unit(), tar_path), + "message": " ".join([ + "Archive has been created on unit %s." % local_unit(), + "Use the juju scp command to copy it to your local machine." + ]) + }) + + +def log(msg): + """ Log a message that will be included in the debug archive. + + Must be run within archive_context """ + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + for line in str(msg).splitlines(): + log_file.write(timestamp + " | " + line.rstrip() + "\n") + + +def run_script(script): + """ Run a single script. Must be run within archive_context """ + log("Running script: " + script) + script_dir = os.path.join(archive_dir, script) + os.makedirs(script_dir) + env = os.environ.copy() + env["PYTHONPATH"] = "lib" # allow same imports as reactive code + env["DEBUG_SCRIPT_DIR"] = script_dir + with open(script_dir + "/stdout", "w") as stdout: + with open(script_dir + "/stderr", "w") as stderr: + process = subprocess.Popen( + "debug-scripts/" + script, + stdout=stdout, stderr=stderr, env=env + ) + try: + exit_code = process.wait(timeout=300) + except subprocess.TimeoutExpired: + log("ERROR: still running, terminating") + process.terminate() + try: + exit_code = process.wait(timeout=10) + except subprocess.TimeoutExpired: + log("ERROR: still running, killing") + process.kill() + exit_code = process.wait(timeout=10) + if exit_code != 0: + log("ERROR: %s failed with exit code %d" % (script, exit_code)) + + +def run_all_scripts(): + """ Run all scripts. For the sake of robustness, log and ignore any + exceptions that occur. + + Must be run within archive_context """ + scripts = os.listdir("debug-scripts") + for script in scripts: + try: + run_script(script) + except: + log(traceback.format_exc()) + + +def main(): + """ Open an archive context and run all scripts. """ + with archive_context(): + run_all_scripts() + + +if __name__ == "__main__": + main() diff --git a/kubernetes-control-plane/actions/get-kubeconfig b/kubernetes-control-plane/actions/get-kubeconfig new file mode 100755 index 0000000..3d8bea4 --- /dev/null +++ b/kubernetes-control-plane/actions/get-kubeconfig @@ -0,0 +1,75 @@ +#!/usr/local/sbin/charm-env python3 +import os +import json +import tempfile +import subprocess +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name + + +def _kubectl(args): + """ + Executes kubectl with args as arguments + """ + snap_bin = os.path.join(os.sep, "snap", "bin") + env = os.environ.copy() + env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]]) + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return subprocess.check_output( + cmd, + env=env, + stderr=subprocess.STDOUT, + ) + + +def get_kubeconfig(): + """ + Read the kubeconfig on this control-plane unit and return it as JSON + """ + try: + result = _kubectl(["config", "view", "-o", "json", "--raw"]) + # JSON format verification + kubeconfig = json.dumps(json.loads(result)) + action_set({"kubeconfig": kubeconfig}) + except json.JSONDecodeError as e: + action_fail("Failed to parse kubeconfig: {}".format(str(e))) + except Exception as e: + action_fail("Failed to retrieve kubeconfig: {}".format(str(e))) + + +def apply_manifest(): + """ + Applies a user defined manifest with kubectl + """ + _, apply_path = tempfile.mkstemp(suffix=".json") + try: + manifest = json.loads(action_get("json")) + with open(apply_path, "w") as manifest_file: + json.dump(manifest, manifest_file) + output = _kubectl(["apply", "-f", apply_path]) + + action_set( + { + "summary": "Manifest applied.", + "output": output.decode("utf-8"), + } + ) + except subprocess.CalledProcessError as e: + action_fail( + "kubectl failed with exit code {} and message: {}".format( + e.returncode, e.output + ) + ) + except json.JSONDecodeError as e: + action_fail("Failed to parse JSON manifest: {}".format(str(e))) + except Exception as e: + action_fail("Failed to apply manifest: {}".format(str(e))) + finally: + os.unlink(apply_path) + + +action = action_name() +if action == "get-kubeconfig": + get_kubeconfig() +elif action == "apply-manifest": + apply_manifest() diff --git a/kubernetes-control-plane/actions/kubectl-actions.py b/kubernetes-control-plane/actions/kubectl-actions.py new file mode 100755 index 0000000..3d8bea4 --- /dev/null +++ b/kubernetes-control-plane/actions/kubectl-actions.py @@ -0,0 +1,75 @@ +#!/usr/local/sbin/charm-env python3 +import os +import json +import tempfile +import subprocess +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name + + +def _kubectl(args): + """ + Executes kubectl with args as arguments + """ + snap_bin = os.path.join(os.sep, "snap", "bin") + env = os.environ.copy() + env["PATH"] = os.pathsep.join([snap_bin, env["PATH"]]) + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return subprocess.check_output( + cmd, + env=env, + stderr=subprocess.STDOUT, + ) + + +def get_kubeconfig(): + """ + Read the kubeconfig on this control-plane unit and return it as JSON + """ + try: + result = _kubectl(["config", "view", "-o", "json", "--raw"]) + # JSON format verification + kubeconfig = json.dumps(json.loads(result)) + action_set({"kubeconfig": kubeconfig}) + except json.JSONDecodeError as e: + action_fail("Failed to parse kubeconfig: {}".format(str(e))) + except Exception as e: + action_fail("Failed to retrieve kubeconfig: {}".format(str(e))) + + +def apply_manifest(): + """ + Applies a user defined manifest with kubectl + """ + _, apply_path = tempfile.mkstemp(suffix=".json") + try: + manifest = json.loads(action_get("json")) + with open(apply_path, "w") as manifest_file: + json.dump(manifest, manifest_file) + output = _kubectl(["apply", "-f", apply_path]) + + action_set( + { + "summary": "Manifest applied.", + "output": output.decode("utf-8"), + } + ) + except subprocess.CalledProcessError as e: + action_fail( + "kubectl failed with exit code {} and message: {}".format( + e.returncode, e.output + ) + ) + except json.JSONDecodeError as e: + action_fail("Failed to parse JSON manifest: {}".format(str(e))) + except Exception as e: + action_fail("Failed to apply manifest: {}".format(str(e))) + finally: + os.unlink(apply_path) + + +action = action_name() +if action == "get-kubeconfig": + get_kubeconfig() +elif action == "apply-manifest": + apply_manifest() diff --git a/kubernetes-control-plane/actions/namespace-create b/kubernetes-control-plane/actions/namespace-create new file mode 100755 index 0000000..50e8650 --- /dev/null +++ b/kubernetes-control-plane/actions/namespace-create @@ -0,0 +1,60 @@ +#!/usr/local/sbin/charm-env python3 +import os +from yaml import safe_load as load +from charmhelpers.core.hookenv import ( + action_get, + action_set, + action_fail, + action_name +) +from charmhelpers.core.templating import render +from subprocess import check_output + + +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + + +def kubectl(args): + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return check_output(cmd) + + +def namespace_list(): + y = load(kubectl(['get', 'namespaces', '-o', 'yaml'])) + ns = [i['metadata']['name'] for i in y['items']] + action_set({'namespaces': ', '.join(ns)+'.'}) + return ns + + +def namespace_create(): + name = action_get('name') + if name in namespace_list(): + action_fail('Namespace "{}" already exists.'.format(name)) + return + + render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml', + context={'name': name}) + kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml']) + action_set({'msg': 'Namespace "{}" created.'.format(name)}) + + +def namespace_delete(): + name = action_get('name') + if name in ['default', 'kube-system']: + action_fail('Not allowed to delete "{}".'.format(name)) + return + if name not in namespace_list(): + action_fail('Namespace "{}" does not exist.'.format(name)) + return + kubectl(['delete', 'ns/'+name]) + action_set({'msg': 'Namespace "{}" deleted.'.format(name)}) + + +action = action_name().replace('namespace-', '') +if action == 'create': + namespace_create() +elif action == 'list': + namespace_list() +elif action == 'delete': + namespace_delete() diff --git a/kubernetes-control-plane/actions/namespace-delete b/kubernetes-control-plane/actions/namespace-delete new file mode 100755 index 0000000..50e8650 --- /dev/null +++ b/kubernetes-control-plane/actions/namespace-delete @@ -0,0 +1,60 @@ +#!/usr/local/sbin/charm-env python3 +import os +from yaml import safe_load as load +from charmhelpers.core.hookenv import ( + action_get, + action_set, + action_fail, + action_name +) +from charmhelpers.core.templating import render +from subprocess import check_output + + +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + + +def kubectl(args): + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return check_output(cmd) + + +def namespace_list(): + y = load(kubectl(['get', 'namespaces', '-o', 'yaml'])) + ns = [i['metadata']['name'] for i in y['items']] + action_set({'namespaces': ', '.join(ns)+'.'}) + return ns + + +def namespace_create(): + name = action_get('name') + if name in namespace_list(): + action_fail('Namespace "{}" already exists.'.format(name)) + return + + render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml', + context={'name': name}) + kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml']) + action_set({'msg': 'Namespace "{}" created.'.format(name)}) + + +def namespace_delete(): + name = action_get('name') + if name in ['default', 'kube-system']: + action_fail('Not allowed to delete "{}".'.format(name)) + return + if name not in namespace_list(): + action_fail('Namespace "{}" does not exist.'.format(name)) + return + kubectl(['delete', 'ns/'+name]) + action_set({'msg': 'Namespace "{}" deleted.'.format(name)}) + + +action = action_name().replace('namespace-', '') +if action == 'create': + namespace_create() +elif action == 'list': + namespace_list() +elif action == 'delete': + namespace_delete() diff --git a/kubernetes-control-plane/actions/namespace-list b/kubernetes-control-plane/actions/namespace-list new file mode 100755 index 0000000..50e8650 --- /dev/null +++ b/kubernetes-control-plane/actions/namespace-list @@ -0,0 +1,60 @@ +#!/usr/local/sbin/charm-env python3 +import os +from yaml import safe_load as load +from charmhelpers.core.hookenv import ( + action_get, + action_set, + action_fail, + action_name +) +from charmhelpers.core.templating import render +from subprocess import check_output + + +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + + +def kubectl(args): + cmd = ["kubectl", "--kubeconfig=/home/ubuntu/config"] + cmd.extend(args) + return check_output(cmd) + + +def namespace_list(): + y = load(kubectl(['get', 'namespaces', '-o', 'yaml'])) + ns = [i['metadata']['name'] for i in y['items']] + action_set({'namespaces': ', '.join(ns)+'.'}) + return ns + + +def namespace_create(): + name = action_get('name') + if name in namespace_list(): + action_fail('Namespace "{}" already exists.'.format(name)) + return + + render('create-namespace.yaml.j2', '/etc/kubernetes/addons/create-namespace.yaml', + context={'name': name}) + kubectl(['create', '-f', '/etc/kubernetes/addons/create-namespace.yaml']) + action_set({'msg': 'Namespace "{}" created.'.format(name)}) + + +def namespace_delete(): + name = action_get('name') + if name in ['default', 'kube-system']: + action_fail('Not allowed to delete "{}".'.format(name)) + return + if name not in namespace_list(): + action_fail('Namespace "{}" does not exist.'.format(name)) + return + kubectl(['delete', 'ns/'+name]) + action_set({'msg': 'Namespace "{}" deleted.'.format(name)}) + + +action = action_name().replace('namespace-', '') +if action == 'create': + namespace_create() +elif action == 'list': + namespace_list() +elif action == 'delete': + namespace_delete() diff --git a/kubernetes-control-plane/actions/restart b/kubernetes-control-plane/actions/restart new file mode 100755 index 0000000..d130733 --- /dev/null +++ b/kubernetes-control-plane/actions/restart @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set +ex + +# Restart the apiserver, controller-manager, and scheduler + +systemctl restart snap.kube-apiserver.daemon +action-set apiserver.status='restarted' + +systemctl restart snap.kube-controller-manager.daemon +action-set controller-manager.status='restarted' + +systemctl restart snap.kube-scheduler.daemon +action-set kube-scheduler.status='restarted' diff --git a/kubernetes-control-plane/actions/upgrade b/kubernetes-control-plane/actions/upgrade new file mode 100755 index 0000000..21b1685 --- /dev/null +++ b/kubernetes-control-plane/actions/upgrade @@ -0,0 +1,9 @@ +#!/bin/bash +set -eux + +if [[ "$(action-get fix-cluster-name)" == "true" ]]; then + charms.reactive set_state 'kubernetes-control-plane.cdk-addons.unique-cluster-tag' +fi + +charms.reactive set_state kubernetes-control-plane.upgrade-specified +exec hooks/config-changed diff --git a/kubernetes-control-plane/actions/user-create b/kubernetes-control-plane/actions/user-create new file mode 100755 index 0000000..d191aad --- /dev/null +++ b/kubernetes-control-plane/actions/user-create @@ -0,0 +1,108 @@ +#!/usr/local/sbin/charm-env python3 +import os +import re +import sys +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name +from charms import layer + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") + +# Import charm layers and start reactive +layer.import_layer_libs() +hookenv._run_atstart() + + +def protect_resources(name): + """Do not allow the action to operate on names used by Charmed Kubernetes.""" + protected_names = [ + "admin", + "system:kube-controller-manager", + "kube-controller-manager", + "system:kube-proxy", + "kube-proxy", + "system:kube-scheduler", + "kube-scheduler", + "system:monitoring", + ] + if name.startswith("kubelet") or name in protected_names: + action_fail('Not allowed to {} "{}".'.format(action, name)) + sys.exit(0) + + +def user_list(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + secrets = layer.kubernetes_common.get_secret_names() + action_set({"users": ", ".join(list(secrets))}) + return secrets + + +def user_create(): + user = action_get("name") + groups = action_get("groups") or "" + protect_resources(user) + + users = user_list() + if user in list(users): + action_fail('User "{}" already exists.'.format(user)) + return + + # Validate the name + if re.search("[^0-9A-Za-z:@.-]+", user): + msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'" + action_fail(msg) + return + + # Create the secret + # TODO: make the token format less magical so it doesn't get out of + # sync with the function that creates secrets in kubernetes_control_plane.py. + token = "{}::{}".format(user, layer.kubernetes_control_plane.token_generator()) + if not layer.kubernetes_control_plane.create_secret(token, user, user, groups): + action_fail("Failed to create secret for: {}".format(user)) + return + + # Create a kubeconfig + ca_crt = layer.kubernetes_common.ca_crt_path + kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user) + endpoints = layer.kubernetes_control_plane.get_external_api_endpoints() + if not endpoints: + action_fail("Kubernetes client endpoints currently unavailable.") + return + public_server = layer.kubernetes_control_plane.get_api_urls(endpoints)[0] + + layer.kubernetes_common.create_kubeconfig( + kubeconfig_path, public_server, ca_crt, token=token, user=user + ) + os.chmod(kubeconfig_path, 0o644) + + # Tell the people what they've won + fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path) + action_set({"msg": 'User "{}" created.'.format(user)}) + action_set({"users": ", ".join(list(users) + [user])}) + action_set({"kubeconfig": fetch_cmd}) + + +def user_delete(): + user = action_get("name") + protect_resources(user) + + users = user_list() + if user not in list(users): + action_fail('User "{}" does not exist.'.format(user)) + return + + # Delete the secret + secret_id = users[user] + layer.kubernetes_control_plane.delete_secret(secret_id) + + action_set({"msg": 'User "{}" deleted.'.format(user)}) + action_set({"users": ", ".join(u for u in list(users) if u != user)}) + + +action = action_name().replace("user-", "") +if action == "create": + user_create() +elif action == "list": + user_list() +elif action == "delete": + user_delete() diff --git a/kubernetes-control-plane/actions/user-delete b/kubernetes-control-plane/actions/user-delete new file mode 100755 index 0000000..d191aad --- /dev/null +++ b/kubernetes-control-plane/actions/user-delete @@ -0,0 +1,108 @@ +#!/usr/local/sbin/charm-env python3 +import os +import re +import sys +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name +from charms import layer + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") + +# Import charm layers and start reactive +layer.import_layer_libs() +hookenv._run_atstart() + + +def protect_resources(name): + """Do not allow the action to operate on names used by Charmed Kubernetes.""" + protected_names = [ + "admin", + "system:kube-controller-manager", + "kube-controller-manager", + "system:kube-proxy", + "kube-proxy", + "system:kube-scheduler", + "kube-scheduler", + "system:monitoring", + ] + if name.startswith("kubelet") or name in protected_names: + action_fail('Not allowed to {} "{}".'.format(action, name)) + sys.exit(0) + + +def user_list(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + secrets = layer.kubernetes_common.get_secret_names() + action_set({"users": ", ".join(list(secrets))}) + return secrets + + +def user_create(): + user = action_get("name") + groups = action_get("groups") or "" + protect_resources(user) + + users = user_list() + if user in list(users): + action_fail('User "{}" already exists.'.format(user)) + return + + # Validate the name + if re.search("[^0-9A-Za-z:@.-]+", user): + msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'" + action_fail(msg) + return + + # Create the secret + # TODO: make the token format less magical so it doesn't get out of + # sync with the function that creates secrets in kubernetes_control_plane.py. + token = "{}::{}".format(user, layer.kubernetes_control_plane.token_generator()) + if not layer.kubernetes_control_plane.create_secret(token, user, user, groups): + action_fail("Failed to create secret for: {}".format(user)) + return + + # Create a kubeconfig + ca_crt = layer.kubernetes_common.ca_crt_path + kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user) + endpoints = layer.kubernetes_control_plane.get_external_api_endpoints() + if not endpoints: + action_fail("Kubernetes client endpoints currently unavailable.") + return + public_server = layer.kubernetes_control_plane.get_api_urls(endpoints)[0] + + layer.kubernetes_common.create_kubeconfig( + kubeconfig_path, public_server, ca_crt, token=token, user=user + ) + os.chmod(kubeconfig_path, 0o644) + + # Tell the people what they've won + fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path) + action_set({"msg": 'User "{}" created.'.format(user)}) + action_set({"users": ", ".join(list(users) + [user])}) + action_set({"kubeconfig": fetch_cmd}) + + +def user_delete(): + user = action_get("name") + protect_resources(user) + + users = user_list() + if user not in list(users): + action_fail('User "{}" does not exist.'.format(user)) + return + + # Delete the secret + secret_id = users[user] + layer.kubernetes_control_plane.delete_secret(secret_id) + + action_set({"msg": 'User "{}" deleted.'.format(user)}) + action_set({"users": ", ".join(u for u in list(users) if u != user)}) + + +action = action_name().replace("user-", "") +if action == "create": + user_create() +elif action == "list": + user_list() +elif action == "delete": + user_delete() diff --git a/kubernetes-control-plane/actions/user-list b/kubernetes-control-plane/actions/user-list new file mode 100755 index 0000000..d191aad --- /dev/null +++ b/kubernetes-control-plane/actions/user-list @@ -0,0 +1,108 @@ +#!/usr/local/sbin/charm-env python3 +import os +import re +import sys +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name +from charms import layer + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") + +# Import charm layers and start reactive +layer.import_layer_libs() +hookenv._run_atstart() + + +def protect_resources(name): + """Do not allow the action to operate on names used by Charmed Kubernetes.""" + protected_names = [ + "admin", + "system:kube-controller-manager", + "kube-controller-manager", + "system:kube-proxy", + "kube-proxy", + "system:kube-scheduler", + "kube-scheduler", + "system:monitoring", + ] + if name.startswith("kubelet") or name in protected_names: + action_fail('Not allowed to {} "{}".'.format(action, name)) + sys.exit(0) + + +def user_list(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + secrets = layer.kubernetes_common.get_secret_names() + action_set({"users": ", ".join(list(secrets))}) + return secrets + + +def user_create(): + user = action_get("name") + groups = action_get("groups") or "" + protect_resources(user) + + users = user_list() + if user in list(users): + action_fail('User "{}" already exists.'.format(user)) + return + + # Validate the name + if re.search("[^0-9A-Za-z:@.-]+", user): + msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'" + action_fail(msg) + return + + # Create the secret + # TODO: make the token format less magical so it doesn't get out of + # sync with the function that creates secrets in kubernetes_control_plane.py. + token = "{}::{}".format(user, layer.kubernetes_control_plane.token_generator()) + if not layer.kubernetes_control_plane.create_secret(token, user, user, groups): + action_fail("Failed to create secret for: {}".format(user)) + return + + # Create a kubeconfig + ca_crt = layer.kubernetes_common.ca_crt_path + kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user) + endpoints = layer.kubernetes_control_plane.get_external_api_endpoints() + if not endpoints: + action_fail("Kubernetes client endpoints currently unavailable.") + return + public_server = layer.kubernetes_control_plane.get_api_urls(endpoints)[0] + + layer.kubernetes_common.create_kubeconfig( + kubeconfig_path, public_server, ca_crt, token=token, user=user + ) + os.chmod(kubeconfig_path, 0o644) + + # Tell the people what they've won + fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path) + action_set({"msg": 'User "{}" created.'.format(user)}) + action_set({"users": ", ".join(list(users) + [user])}) + action_set({"kubeconfig": fetch_cmd}) + + +def user_delete(): + user = action_get("name") + protect_resources(user) + + users = user_list() + if user not in list(users): + action_fail('User "{}" does not exist.'.format(user)) + return + + # Delete the secret + secret_id = users[user] + layer.kubernetes_control_plane.delete_secret(secret_id) + + action_set({"msg": 'User "{}" deleted.'.format(user)}) + action_set({"users": ", ".join(u for u in list(users) if u != user)}) + + +action = action_name().replace("user-", "") +if action == "create": + user_create() +elif action == "list": + user_list() +elif action == "delete": + user_delete() diff --git a/kubernetes-control-plane/actions/user_actions.py b/kubernetes-control-plane/actions/user_actions.py new file mode 100755 index 0000000..d191aad --- /dev/null +++ b/kubernetes-control-plane/actions/user_actions.py @@ -0,0 +1,108 @@ +#!/usr/local/sbin/charm-env python3 +import os +import re +import sys +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import action_get, action_set, action_fail, action_name +from charms import layer + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") + +# Import charm layers and start reactive +layer.import_layer_libs() +hookenv._run_atstart() + + +def protect_resources(name): + """Do not allow the action to operate on names used by Charmed Kubernetes.""" + protected_names = [ + "admin", + "system:kube-controller-manager", + "kube-controller-manager", + "system:kube-proxy", + "kube-proxy", + "system:kube-scheduler", + "kube-scheduler", + "system:monitoring", + ] + if name.startswith("kubelet") or name in protected_names: + action_fail('Not allowed to {} "{}".'.format(action, name)) + sys.exit(0) + + +def user_list(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + secrets = layer.kubernetes_common.get_secret_names() + action_set({"users": ", ".join(list(secrets))}) + return secrets + + +def user_create(): + user = action_get("name") + groups = action_get("groups") or "" + protect_resources(user) + + users = user_list() + if user in list(users): + action_fail('User "{}" already exists.'.format(user)) + return + + # Validate the name + if re.search("[^0-9A-Za-z:@.-]+", user): + msg = "User name may only contain alphanumeric characters, ':', '@', '-' or '.'" + action_fail(msg) + return + + # Create the secret + # TODO: make the token format less magical so it doesn't get out of + # sync with the function that creates secrets in kubernetes_control_plane.py. + token = "{}::{}".format(user, layer.kubernetes_control_plane.token_generator()) + if not layer.kubernetes_control_plane.create_secret(token, user, user, groups): + action_fail("Failed to create secret for: {}".format(user)) + return + + # Create a kubeconfig + ca_crt = layer.kubernetes_common.ca_crt_path + kubeconfig_path = "/home/ubuntu/{}-kubeconfig".format(user) + endpoints = layer.kubernetes_control_plane.get_external_api_endpoints() + if not endpoints: + action_fail("Kubernetes client endpoints currently unavailable.") + return + public_server = layer.kubernetes_control_plane.get_api_urls(endpoints)[0] + + layer.kubernetes_common.create_kubeconfig( + kubeconfig_path, public_server, ca_crt, token=token, user=user + ) + os.chmod(kubeconfig_path, 0o644) + + # Tell the people what they've won + fetch_cmd = "juju scp {}:{} .".format(hookenv.local_unit(), kubeconfig_path) + action_set({"msg": 'User "{}" created.'.format(user)}) + action_set({"users": ", ".join(list(users) + [user])}) + action_set({"kubeconfig": fetch_cmd}) + + +def user_delete(): + user = action_get("name") + protect_resources(user) + + users = user_list() + if user not in list(users): + action_fail('User "{}" does not exist.'.format(user)) + return + + # Delete the secret + secret_id = users[user] + layer.kubernetes_control_plane.delete_secret(secret_id) + + action_set({"msg": 'User "{}" deleted.'.format(user)}) + action_set({"users": ", ".join(u for u in list(users) if u != user)}) + + +action = action_name().replace("user-", "") +if action == "create": + user_create() +elif action == "list": + user_list() +elif action == "delete": + user_delete() diff --git a/kubernetes-control-plane/bin/charm-env b/kubernetes-control-plane/bin/charm-env new file mode 100755 index 0000000..d211ce9 --- /dev/null +++ b/kubernetes-control-plane/bin/charm-env @@ -0,0 +1,107 @@ +#!/bin/bash + +VERSION="1.0.0" + + +find_charm_dirs() { + # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous. + if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then + if [[ -z "$JUJU_CHARM_DIR" ]]; then + # accept $CHARM_DIR to be more forgiving + export JUJU_CHARM_DIR="$CHARM_DIR" + fi + if [[ -z "$CHARM_DIR" ]]; then + # set CHARM_DIR as well to help with backwards compatibility + export CHARM_DIR="$JUJU_CHARM_DIR" + fi + return + fi + # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate + # (because there's got to be at least one principle) charm directory; + # if there are several, pick the first by alpha order. + agents_dir="/var/lib/juju/agents" + if [[ -d "$agents_dir" ]]; then + desired_charm="$1" + found_charm_dir="" + if [[ -n "$desired_charm" ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')" + if [[ "$charm_name" == "$desired_charm" ]]; then + if [[ -n "$found_charm_dir" ]]; then + >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + found_charm_dir="$charm_dir" + fi + done + if [[ -z "$found_charm_dir" ]]; then + >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + export JUJU_CHARM_DIR="$found_charm_dir" + export CHARM_DIR="$found_charm_dir" + return + fi + # shellcheck disable=SC2126 + non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)" + if [[ "$non_subordinates" -gt 1 ]]; then + >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context' + exit 1 + elif [[ "$non_subordinates" -eq 1 ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then + continue + fi + export JUJU_CHARM_DIR="$charm_dir" + export CHARM_DIR="$charm_dir" + return + done + fi + fi + >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context' + exit 1 +} + +try_activate_venv() { + if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then + . "$JUJU_CHARM_DIR/../.venv/bin/activate" + fi +} + +find_wrapped() { + PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")" +} + + +if [[ "$1" == "--version" || "$1" == "-v" ]]; then + echo "$VERSION" + exit 0 +fi + + +# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous +# NB: --charm option must come first +# NB: option must be processed outside find_charm_dirs to modify $@ +charm_name="" +if [[ "$1" == "--charm" ]]; then + charm_name="$2" + shift; shift +fi + +find_charm_dirs "$charm_name" +try_activate_venv +export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH" + +if [[ "$(basename "$0")" == "charm-env" ]]; then + # being used as a shebang + exec "$@" +elif [[ "$0" == "$BASH_SOURCE" ]]; then + # being invoked as a symlink wrapping something to find in the venv + exec "$(find_wrapped)" "$@" +elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then + # being sourced directly; do nothing + /bin/true +else + # being sourced for wrapped bash helpers + . "$(find_wrapped)" +fi diff --git a/kubernetes-control-plane/bin/layer_option b/kubernetes-control-plane/bin/layer_option new file mode 100755 index 0000000..3253ef8 --- /dev/null +++ b/kubernetes-control-plane/bin/layer_option @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import sys +import argparse +from charms import layer + + +parser = argparse.ArgumentParser(description='Access layer options.') +parser.add_argument('section', + help='the section, or layer, the option is from') +parser.add_argument('option', + help='the option to access') + +args = parser.parse_args() +value = layer.options.get(args.section, args.option) +if isinstance(value, bool): + sys.exit(0 if value else 1) +elif isinstance(value, list): + for val in value: + print(val) +else: + print(value) diff --git a/kubernetes-control-plane/build-cni-resources.sh b/kubernetes-control-plane/build-cni-resources.sh new file mode 100755 index 0000000..0da8f1e --- /dev/null +++ b/kubernetes-control-plane/build-cni-resources.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -eux + +# When changing CNI_VERSION, it should be updated in both +# charm-kubernetes-control-plane/build-cni-resources.sh and +# charm-kubernetes-worker/build-cni-resources.sh +CNI_VERSION="${CNI_VERSION:-v0.7.5}" +ARCH="${ARCH:-amd64 arm64 s390x}" + +build_script_commit="$(git show --oneline -q)" +temp_dir="$(readlink -f build-cni-resources.tmp)" +rm -rf "$temp_dir" +mkdir "$temp_dir" +(cd "$temp_dir" + git clone https://github.com/containernetworking/plugins.git cni-plugins \ + --branch "$CNI_VERSION" \ + --depth 1 + + # Grab the user id and group id of this current user. + GROUP_ID=$(id -g) + USER_ID=$(id -u) + + for arch in $ARCH; do + echo "Building cni $CNI_VERSION for $arch" + rm -f cni-plugins/bin/* + docker run \ + --rm \ + -e GOOS=linux \ + -e GOARCH="$arch" \ + -v "$temp_dir"/cni-plugins:/cni \ + golang:1.15 \ + /bin/bash -c "cd /cni && ./build.sh && chown -R ${USER_ID}:${GROUP_ID} /cni" + + (cd cni-plugins/bin + echo "cni-$arch $CNI_VERSION" >> BUILD_INFO + echo "Built $(date)" >> BUILD_INFO + echo "build script commit: $build_script_commit" >> BUILD_INFO + echo "cni-plugins commit: $(git show --oneline -q)" >> BUILD_INFO + tar -czf "$temp_dir/cni-$arch.tgz" . + ) + done +) +mv "$temp_dir"/cni-*.tgz . +rm -rf "$temp_dir" diff --git a/kubernetes-control-plane/config.yaml b/kubernetes-control-plane/config.yaml new file mode 100644 index 0000000..79731f3 --- /dev/null +++ b/kubernetes-control-plane/config.yaml @@ -0,0 +1,511 @@ +# Copyright 2016 Canonical Ltd. +# +# This file is part of the Snap layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"options": + # snap_proxy and snap_proxy_url have been deprecated for some time. + # If your charm still needs them, add these config items manually + # to your charm's config.yaml. + # snap_proxy: + # description: > + # DEPRECATED. Use snap-http-proxy and snap-https-proxy model configuration settings. + # HTTP/HTTPS web proxy for Snappy to use when accessing the snap store. + # type: string + # default: "" + # snap_proxy_url: + # default: "" + # type: string + # description: > + # DEPRECATED. Use snap-store-proxy model configuration setting. + # The address of a Snap Store Proxy to use for snaps e.g. http://snap-proxy.example.com + "snapd_refresh": + "default": "max" + "type": "string" + "description": | + How often snapd handles updates for installed snaps. Setting an empty + string will check 4x per day. Set to "max" to delay the refresh as long + as possible. You may also set a custom string as described in the + 'refresh.timer' section here: + https://forum.snapcraft.io/t/system-options/87 + + DEPRECATED in 1.19: Manage installed snap versions with the snap-store-proxy model config. + See: https://snapcraft.io/snap-store-proxy and https://juju.is/docs/offline-mode-strategies#heading--snap-specific-proxy + "nagios_context": + "default": "juju" + "type": "string" + "description": | + Used by the nrpe subordinate charms. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + "nagios_servicegroups": + "default": "" + "type": "string" + "description": | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup + "sysctl": + "type": "string" + "default": "{net.ipv4.conf.all.forwarding: 1, net.ipv4.conf.all.rp_filter: 1,\ + \ net.ipv4.neigh.default.gc_thresh1: 128, net.ipv4.neigh.default.gc_thresh2:\ + \ 28672, net.ipv4.neigh.default.gc_thresh3: 32768, net.ipv6.neigh.default.gc_thresh1:\ + \ 128, net.ipv6.neigh.default.gc_thresh2: 28672, net.ipv6.neigh.default.gc_thresh3:\ + \ 32768, fs.inotify.max_user_instances: 8192, fs.inotify.max_user_watches: 1048576,\ + \ kernel.panic: 10, kernel.panic_on_oops: 1, vm.overcommit_memory: 1}" + "description": | + YAML formatted associative array of sysctl values, e.g.: + '{kernel.pid_max: 4194303}'. Note that kube-proxy handles + the conntrack settings. The proper way to alter them is to + use the proxy-extra-args config to set them, e.g.: + juju config kubernetes-control-plane proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore + kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between + the setting of conntrack-max-per-core vs nf_conntrack_max. + "proxy-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-proxy. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + "kubelet-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kubelet. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kubelet being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + Note: As of Kubernetes 1.10.x, many of Kubelet's args have been deprecated, and can + be set with kubelet-extra-config instead. + "kubelet-extra-config": + "default": "{}" + "type": "string" + "description": | + Extra configuration to be passed to kubelet. Any values specified in this + config will be merged into a KubeletConfiguration file that is passed to + the kubelet service via the --config flag. This can be used to override + values provided by the charm. + + Requires Kubernetes 1.10+. + + The value for this config must be a YAML mapping that can be safely + merged with a KubeletConfiguration file. For example: + {evictionHard: {memory.available: 200Mi}} + + For more information about KubeletConfiguration, see upstream docs: + https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ + "labels": + "type": "string" + "default": "" + "description": | + Labels can be used to organize and to select subsets of nodes in the + cluster. Declare node labels in key=value format, separated by spaces. + "extra_packages": + "description": > + Space separated list of extra deb packages to install. + "type": "string" + "default": "" + "package_status": + "default": "install" + "type": "string" + "description": > + The status of service-affecting packages will be set to this + value in the dpkg database. Valid values are "install" and "hold". + "install_sources": + "description": > + List of extra apt sources, per charm-helpers standard + format (a yaml list of strings encoded as a string). Each source + may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + "type": "string" + "default": "" + "install_keys": + "description": > + List of signing keys for install_sources package sources, per + charmhelpers standard format (a yaml list of strings encoded as + a string). The keys should be the full ASCII armoured GPG public + keys. While GPG key ids are also supported and looked up on a + keyserver, operators should be aware that this mechanism is + insecure. null can be used if a standard package signing key is + used that will already be installed on the machine, and for PPA + sources where the package signing key is securely retrieved from + Launchpad. + "type": "string" + "default": "" + "ha-cluster-vip": + "type": "string" + "description": | + Virtual IP for the charm to use with the HA Cluster subordinate charm + Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are + separated by spaces. + "default": "" + "ha-cluster-dns": + "type": "string" + "description": | + DNS entry to use with the HA Cluster subordinate charm. + Mutually exclusive with ha-cluster-vip. + "default": "" + "audit-policy": + "type": "string" + "default": | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # Don't log read-only requests from the apiserver + - level: None + users: ["system:apiserver"] + verbs: ["get", "list", "watch"] + # Don't log kube-proxy watches + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - resources: ["endpoints", "services"] + # Don't log nodes getting their own status + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - resources: ["nodes"] + # Don't log kube-controller-manager and kube-scheduler getting endpoints + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - resources: ["endpoints"] + # Log everything else at the Request level. + - level: Request + omitStages: + - RequestReceived + "description": | + Audit policy passed to kube-apiserver via --audit-policy-file. + For more info, please refer to the upstream documentation at + https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ + "audit-webhook-config": + "type": "string" + "default": "" + "description": | + Audit webhook config passed to kube-apiserver via --audit-webhook-config-file. + For more info, please refer to the upstream documentation at + https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ + "image-registry": + "type": "string" + "default": "rocks.canonical.com:443/cdk" + "description": | + Container image registry to use for CDK. This includes addons like the Kubernetes dashboard, + metrics server, ingress, and dns along with non-addon images including the pause + container and default backend image. + "enable-dashboard-addons": + "type": "boolean" + "default": !!bool "true" + "description": "Deploy the Kubernetes Dashboard" + "dns-provider": + "type": "string" + "default": "auto" + "description": | + DNS provider addon to use. Can be "auto", "core-dns", "kube-dns", or + "none". + + CoreDNS is only supported on Kubernetes 1.14+. + + When set to "auto", the behavior is as follows: + - New deployments of Kubernetes 1.14+ will use CoreDNS + - New deployments of Kubernetes 1.13 or older will use KubeDNS + - Upgraded deployments will continue to use whichever provider was + previously used. + "dns_domain": + "type": "string" + "default": "cluster.local" + "description": "The local domain for cluster dns" + "extra_sans": + "type": "string" + "default": "" + "description": | + Space-separated list of extra SAN entries to add to the x509 certificate + created for the control plane nodes. + "service-cidr": + "type": "string" + "default": "10.152.183.0/24" + "description": | + CIDR to use for Kubernetes services. After deployment it is + only possible to increase the size of the IP range. It is not possible to + change or shrink the address range after deployment. + "allow-privileged": + "type": "string" + "default": "auto" + "description": | + Allow kube-apiserver to run in privileged mode. Supported values are + "true", "false", and "auto". If "true", kube-apiserver will run in + privileged mode by default. If "false", kube-apiserver will never run in + privileged mode. If "auto", kube-apiserver will not run in privileged + mode by default, but will switch to privileged mode if gpu hardware is + detected on a worker node. + "enable-nvidia-plugin": + "type": "string" + "default": "auto" + "description": | + Load the nvidia device plugin daemonset. Supported values are + "auto" and "false". When "auto", the daemonset will be loaded + only if GPUs are detected. When "false" the nvidia device plugin + will not be loaded. + "channel": + "type": "string" + "default": "1.24/stable" + "description": | + Snap channel to install Kubernetes control plane services from + "client_password": + "type": "string" + "default": "" + "description": | + Password to be used for admin user (leave empty for random password). + "api-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-apiserver. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + "controller-manager-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-controller-manager. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-controller-manager being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + "scheduler-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-scheduler. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-scheduler being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + "authorization-mode": + "type": "string" + "default": "Node,RBAC" + "description": | + Comma separated authorization modes. Allowed values are + "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow". + "require-manual-upgrade": + "type": "boolean" + "default": !!bool "true" + "description": | + When true, control plane nodes will not be upgraded until the user triggers + it manually by running the upgrade action. + "storage-backend": + "type": "string" + "default": "auto" + "description": | + The storage backend for kube-apiserver persistence. Can be "etcd2", "etcd3", or + "auto". Auto mode will select etcd3 on new installations, or etcd2 on upgrades. + "enable-metrics": + "type": "boolean" + "default": !!bool "true" + "description": | + If true the metrics server for Kubernetes will be deployed onto the cluster + managed entirely by kubernetes addons. Consider disabling this option and deploying + `kubernetes-metrics-server-operator` into a kubernetes model. + "default-storage": + "type": "string" + "default": "auto" + "description": | + The storage class to make the default storage class. Allowed values are "auto", + "none", "ceph-xfs", "ceph-ext4", "cephfs". Note: Only works in Kubernetes >= 1.10 + "cephfs-mounter": + "type": "string" + "default": "default" + "description": | + The client driver used for cephfs based storage. Options are "fuse", "kernel" and "default". + "keystone-policy": + "default": | + apiVersion: v1 + kind: ConfigMap + metadata: + name: k8s-auth-policy + namespace: kube-system + labels: + k8s-app: k8s-keystone-auth + data: + policies: | + [ + { + "resource": { + "verbs": ["get", "list", "watch"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-viewers"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "default" + }, + "match": [ + { + "type": "role", + "values": ["k8s-users"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-admins"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + } + ] + "type": "string" + "description": | + Policy for Keystone authorization. This is used when a Keystone charm is + related to kubernetes-control-plane in order to provide authorization + for Keystone users on the Kubernetes cluster. + "enable-keystone-authorization": + "type": "boolean" + "default": !!bool "false" + "description": | + If true and the Keystone charm is related, users will authorize against + the Keystone server. Note that if related, users will always authenticate + against Keystone. + "keystone-ssl-ca": + "type": "string" + "description": | + Keystone certificate authority encoded in base64 for securing communications to Keystone. + For example: `juju config kubernetes-control-plane keystone-ssl-ca=$(base64 /path/to/ca.crt)` + "default": "" + "dashboard-auth": + "type": "string" + "description": | + Method of authentication for the Kubernetes dashboard. Allowed values are "auto", + "basic", and "token". If set to "auto", basic auth is used unless Keystone is + related to kubernetes-control-plane, in which case token auth is used. + + DEPRECATED: this option has no effect on Kubernetes 1.19 and above. + "default": "auto" + "loadbalancer-ips": + "type": "string" + "description": | + Space separated list of IP addresses of loadbalancers in front of the control plane. + These can be either virtual IP addresses that have been floated in front of the control + plane or the IP of a loadbalancer appliance such as an F5. Workers will alternate IP + addresses from this list to distribute load - for example If you have 2 IPs and 4 workers, + each IP will be used by 2 workers. Note that this will only work if kubeapi-load-balancer + is not in use and there is a relation between kubernetes-control-plane:kube-api-endpoint and + kubernetes-worker:kube-api-endpoint. If using the kubeapi-load-balancer, see the + loadbalancer-ips configuration variable on the kubeapi-load-balancer charm. + "default": "" + "default-cni": + "type": "string" + "description": | + Default CNI network to use when multiple CNI subordinates are related. + + The value of this config should be the application name of a related CNI + subordinate. For example: + + juju config kubernetes-control-plane default-cni=flannel + + If unspecified, then the default CNI network is chosen alphabetically. + "default": "" + "authn-webhook-endpoint": + "type": "string" + "default": "" + "description": | + Custom endpoint to check when authenticating kube-apiserver requests. + This must be an https url accessible by the kubernetes-control-plane units. For example: + + https://your.server:8443/authenticate + + When a JSON-serialized TokenReview object is POSTed to this endpoint, it must + respond with appropriate authentication details. For more info, please refer + to the upstream documentation at + https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + "pod-security-policy": + "type": "string" + "default": "" + "description": | + Default RBAC pod security policy [0] and privileged cluster roles formatted + as a YAML file as a string. + A good example of a PSP policy can be found here [1]. + + [0] https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + [1] https://github.com/kubernetes/examples/blob/master/staging/podsecuritypolicy/rbac/policies.yaml + "register-with-taints": + "type": "string" + "default": "juju.is/kubernetes-control-plane=true:NoSchedule" + "description": | + Space-separated list of taints to apply to this node at registration time. + + This config is only used at deploy time when Kubelet first registers the + node with Kubernetes. To change node taints after deploy time, use kubectl + instead. + + For more information, see the upstream Kubernetes documentation about + taints: + https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + "api-aggregation-extension": + "type": "boolean" + "default": !!bool "true" + "description": | + Note: required if 'enable-metrics' is enabled. + + Configuring the aggregation layer allows the Kubernetes apiserver to be extended + with additional APIs, which are not part of the core Kubernetes APIs. + + For more information, see the upstream Kubernetes documentation about this + feature: + https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#enable-kubernetes-apiserver-flags + + diff --git a/kubernetes-control-plane/copyright b/kubernetes-control-plane/copyright new file mode 100644 index 0000000..8aec8ec --- /dev/null +++ b/kubernetes-control-plane/copyright @@ -0,0 +1,13 @@ +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-apt b/kubernetes-control-plane/copyright.layer-apt new file mode 100644 index 0000000..0814dc1 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-apt @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Apt layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-control-plane/copyright.layer-basic b/kubernetes-control-plane/copyright.layer-basic new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-basic @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-coordinator b/kubernetes-control-plane/copyright.layer-coordinator new file mode 100644 index 0000000..b8518aa --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-coordinator @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Coordinator Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-control-plane/copyright.layer-leadership b/kubernetes-control-plane/copyright.layer-leadership new file mode 100644 index 0000000..08b983f --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-leadership @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Leadership Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-control-plane/copyright.layer-metrics b/kubernetes-control-plane/copyright.layer-metrics new file mode 100644 index 0000000..2df15bd --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-metrics @@ -0,0 +1,13 @@ +Copyright 2016 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-nagios b/kubernetes-control-plane/copyright.layer-nagios new file mode 100644 index 0000000..c80db95 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-nagios @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2016, Canonical Ltd. +License: GPL-3 + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License version 3, as + published by the Free Software Foundation. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranties of + MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR + PURPOSE. See the GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . diff --git a/kubernetes-control-plane/copyright.layer-options b/kubernetes-control-plane/copyright.layer-options new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-options @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-snap b/kubernetes-control-plane/copyright.layer-snap new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-snap @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-status b/kubernetes-control-plane/copyright.layer-status new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-status @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-vault-kv b/kubernetes-control-plane/copyright.layer-vault-kv new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-vault-kv @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/copyright.layer-vaultlocker b/kubernetes-control-plane/copyright.layer-vaultlocker new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/copyright.layer-vaultlocker @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/debug-scripts/auth-webhook b/kubernetes-control-plane/debug-scripts/auth-webhook new file mode 100755 index 0000000..befa79e --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/auth-webhook @@ -0,0 +1,7 @@ +#!/bin/sh +set -ux + +systemctl status cdk.master.auth-webhook.service > $DEBUG_SCRIPT_DIR/auth-webhook-systemctl-status + +AUTH_LOG=/root/cdk/auth-webhook/auth-webhook.log +test -f $AUTH_LOG && cp $AUTH_LOG $DEBUG_SCRIPT_DIR diff --git a/kubernetes-control-plane/debug-scripts/charm-unitdata b/kubernetes-control-plane/debug-scripts/charm-unitdata new file mode 100755 index 0000000..d2aac60 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/charm-unitdata @@ -0,0 +1,12 @@ +#!/usr/local/sbin/charm-env python3 + +import debug_script +import json +from charmhelpers.core import unitdata + +kv = unitdata.kv() +data = kv.getrange("") + +with debug_script.open_file("unitdata.json", "w") as f: + json.dump(data, f, indent=2) + f.write("\n") diff --git a/kubernetes-control-plane/debug-scripts/filesystem b/kubernetes-control-plane/debug-scripts/filesystem new file mode 100755 index 0000000..c5ec6d8 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/filesystem @@ -0,0 +1,17 @@ +#!/bin/sh +set -ux + +# report file system disk space usage +df -hT > $DEBUG_SCRIPT_DIR/df-hT +# estimate file space usage +du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h +# list the mounted filesystems +mount > $DEBUG_SCRIPT_DIR/mount +# list the mounted systems with ascii trees +findmnt -A > $DEBUG_SCRIPT_DIR/findmnt +# list block devices +lsblk > $DEBUG_SCRIPT_DIR/lsblk +# list open files +lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof +# list local system locks +lslocks > $DEBUG_SCRIPT_DIR/lslocks diff --git a/kubernetes-control-plane/debug-scripts/juju-logs b/kubernetes-control-plane/debug-scripts/juju-logs new file mode 100755 index 0000000..d27c458 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/juju-logs @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR diff --git a/kubernetes-control-plane/debug-scripts/juju-network-get b/kubernetes-control-plane/debug-scripts/juju-network-get new file mode 100755 index 0000000..983c8c4 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/juju-network-get @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import yaml +import debug_script + +with open('metadata.yaml') as f: + metadata = yaml.load(f) + +relations = [] +for key in ['requires', 'provides', 'peers']: + relations += list(metadata.get(key, {}).keys()) + +os.mkdir(os.path.join(debug_script.dir, 'relations')) + +for relation in relations: + path = 'relations/' + relation + with debug_script.open_file(path, 'w') as f: + cmd = ['network-get', relation] + subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT) diff --git a/kubernetes-control-plane/debug-scripts/kubectl b/kubernetes-control-plane/debug-scripts/kubectl new file mode 100755 index 0000000..216231d --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/kubectl @@ -0,0 +1,15 @@ +#!/bin/sh +set -ux + +export PATH=$PATH:/snap/bin + +alias kubectl="kubectl --kubeconfig=/root/.kube/config" + +kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info +kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump +for obj in pods svc ingress secrets pv pvc rc; do + kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj +done +for obj in nodes; do + kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj +done diff --git a/kubernetes-control-plane/debug-scripts/kubernetes-master-services b/kubernetes-control-plane/debug-scripts/kubernetes-master-services new file mode 100755 index 0000000..59d646b --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/kubernetes-master-services @@ -0,0 +1,9 @@ +#!/bin/sh +set -ux + +for service in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do + systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status + journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal +done + +# FIXME: grab snap config or something diff --git a/kubernetes-control-plane/debug-scripts/network b/kubernetes-control-plane/debug-scripts/network new file mode 100755 index 0000000..944a355 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/network @@ -0,0 +1,11 @@ +#!/bin/sh +set -ux + +ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig +cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf +cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces +netstat -planut > $DEBUG_SCRIPT_DIR/netstat +route -n > $DEBUG_SCRIPT_DIR/route +iptables-save > $DEBUG_SCRIPT_DIR/iptables-save +dig google.com > $DEBUG_SCRIPT_DIR/dig-google +ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google diff --git a/kubernetes-control-plane/debug-scripts/packages b/kubernetes-control-plane/debug-scripts/packages new file mode 100755 index 0000000..b60a9cf --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/packages @@ -0,0 +1,7 @@ +#!/bin/sh +set -ux + +dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list +snap list > $DEBUG_SCRIPT_DIR/snap-list +pip2 list > $DEBUG_SCRIPT_DIR/pip2-list +pip3 list > $DEBUG_SCRIPT_DIR/pip3-list diff --git a/kubernetes-control-plane/debug-scripts/sysctl b/kubernetes-control-plane/debug-scripts/sysctl new file mode 100755 index 0000000..a86a6c8 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/sysctl @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +sysctl -a > $DEBUG_SCRIPT_DIR/sysctl diff --git a/kubernetes-control-plane/debug-scripts/systemd b/kubernetes-control-plane/debug-scripts/systemd new file mode 100755 index 0000000..8bb9b6f --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/systemd @@ -0,0 +1,9 @@ +#!/bin/sh +set -ux + +systemctl --all > $DEBUG_SCRIPT_DIR/systemctl +journalctl > $DEBUG_SCRIPT_DIR/journalctl +systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time +systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame +systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain +systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump diff --git a/kubernetes-control-plane/debug-scripts/tls-certs b/kubernetes-control-plane/debug-scripts/tls-certs new file mode 100755 index 0000000..2692e51 --- /dev/null +++ b/kubernetes-control-plane/debug-scripts/tls-certs @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import shutil +import traceback +import debug_script +from charms import layer + +options = layer.options.get('tls-client') + +def copy_cert(source_key, name): + try: + source = options[source_key] + dest = os.path.join(debug_script.dir, name) + shutil.copy(source, dest) + except Exception: + traceback.print_exc() + +copy_cert('client_certificate_path', 'client.crt') +copy_cert('server_certificate_path', 'server.crt') +copy_cert('ca_certificate_path', 'ca.crt') diff --git a/kubernetes-control-plane/docs/README b/kubernetes-control-plane/docs/README new file mode 100644 index 0000000..9973bb8 --- /dev/null +++ b/kubernetes-control-plane/docs/README @@ -0,0 +1 @@ +This docs directory is currently experimental. Please do not make changes to the docs here as any edits may be lost diff --git a/kubernetes-control-plane/docs/index.md b/kubernetes-control-plane/docs/index.md new file mode 100644 index 0000000..c539440 --- /dev/null +++ b/kubernetes-control-plane/docs/index.md @@ -0,0 +1,815 @@ + + +This charm is an encapsulation of the Kubernetes control plane processes and the +operations to run on any cloud for the entire lifecycle of the cluster. + +This charm is built from other charm layers using the Juju reactive framework. +The other layers focus on specific subset of operations making this layer +specific to operations of Kubernetes control plane processes. + +# Deployment + +This charm is not fully functional when deployed by itself. It requires other +charms to model a complete Kubernetes cluster. A Kubernetes cluster needs a +distributed key value store such as [Etcd](https://coreos.com/etcd/) and the +kubernetes-worker charm which delivers the Kubernetes node services. A cluster +requires a Software Defined Network (SDN), a Container Runtime such as +[containerd](https://charmhub.io/containerd), and Transport Layer +Security (TLS) so the components in a cluster communicate securely. + +Please take a look at the [Charmed Kubernetes]( https://charmhub.io/charmed-kubernetes) +or the [Kubernetes core](https://charmhub.io/kubernetes-core) bundles for +examples of complete models of Kubernetes clusters. + +# Resources + +The kubernetes-control-plane charm takes advantage of the [Juju Resources](https://juju.is/docs/sdk/resources) +feature to deliver the Kubernetes software. + +In deployments on public clouds Charmhub provides the resource to the +charm automatically with no user intervention. Some environments with strict +firewall rules may not be able to contact Charmhub. In these network +restricted environments the resource can be uploaded to the model by the Juju +operator. + +Additionally, if the firewall rules restrict access to the Snap Store, the +[Snap Store Proxy](https://ubuntu.com/kubernetes/docs/proxies) may be used to +provide the snaps. + +#### Snap Refresh + +The kubernetes resources used by this charm are snap packages. When not +specified during deployment, these resources come from the public store. By +default, the `snapd` daemon will refresh all snaps installed from the store +four (4) times per day. If there is a desire to further control this, the +[Snap Store Proxy](https://ubuntu.com/kubernetes/docs/proxies) should be used. + +## Configuration + +This charm supports some configuration options to set up a Kubernetes cluster +that works in your environment, detailed in the section below. + +For some specific Kubernetes service configuration tasks, please also see the +section on [configuring K8s services](#k8s-services). + + + + + + +| name | type | Default | Description | +|------|--------|--------------|-------------------------------------------| +| allow-privileged | string | auto | [See notes](#allow-privileged-description) | +| api-extra-args | string | | [See notes](#api-extra-args-description) | +| audit-policy | string | [See notes](#audit-policy-default) | Audit policy passed to kube-apiserver via --audit-policy-file. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ | +| audit-webhook-config | string | | Audit webhook config passed to kube-apiserver via --audit-webhook-config-file. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ | +| authorization-mode | string | AlwaysAllow | Comma separated authorization modes. Allowed values are "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow". | +| channel | string | 1.17/stable | Snap channel to install Kubernetes master services from | +| client_password | string | | Password to be used for admin user (leave empty for random password). | +| controller-manager-extra-args | string | | [See notes](#controller-manager-extra-args-description) | +| dashboard-auth | string | auto | [See notes](#dashboard-auth-description) | +| default-storage | string | auto | The storage class to make the default storage class. Allowed values are "auto", "none", "ceph-xfs", "ceph-ext4". Note: Only works in Kubernetes >= 1.10 | +| dns-provider | string | auto | [See notes](#dns-provider-description) | +| dns_domain | string | cluster.local | The local domain for cluster dns | +| enable-dashboard-addons | boolean | True | Deploy the Kubernetes Dashboard and Heapster addons | +| enable-keystone-authorization | boolean | False | If true and the Keystone charm is related, users will authorize against the Keystone server. Note that if related, users will always authenticate against Keystone. | +| enable-metrics | boolean | True | If true the metrics server for Kubernetes will be deployed onto the cluster. | +| enable-nvidia-plugin | string | auto | Load the nvidia device plugin daemonset. Supported values are "auto" and "false". When "auto", the daemonset will be loaded only if GPUs are detected. When "false" the nvidia device plugin will not be loaded. | +| extra_packages | string | | Space separated list of extra deb packages to install. | +| extra_sans | string | | Space-separated list of extra SAN entries to add to the x509 certificate created for the master nodes. | +| ha-cluster-dns | string | | DNS entry to use with the HA Cluster subordinate charm. Mutually exclusive with ha-cluster-vip. | +| ha-cluster-vip | string | | Virtual IP for the charm to use with the HA Cluster subordinate charm Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are separated by spaces. | +| image-registry | string | [See notes](#image-registry-default) | Container image registry to use for CDK. This includes addons like the Kubernetes dashboard, metrics server, ingress, and dns along with non-addon images including the pause container and default backend image. | +| install_keys | string | | [See notes](#install_keys-description) | +| install_sources | string | | [See notes](#install_sources-description) | +| keystone-policy | string | [See notes](#keystone-policy-default) | Policy for Keystone authorization. This is used when a Keystone charm is related to kubernetes-master in order to provide authorization for Keystone users on the Kubernetes cluster. | +| keystone-ssl-ca | string | | Keystone certificate authority encoded in base64 for securing communications to Keystone. For example: `juju config kubernetes-master keystone-ssl-ca=$(base64 /path/to/ca.crt)` | +| loadbalancer-ips | string | | [See notes](#loadbalancer-ips-description) | +| nagios_context | string | juju | [See notes](#nagios_context-description) | +| nagios_servicegroups | string | | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup | +| package_status | string | install | The status of service-affecting packages will be set to this value in the dpkg database. Valid values are "install" and "hold". | +| proxy-extra-args | string | | [See notes](#proxy-extra-args-description) | +| require-manual-upgrade | boolean | True | When true, master nodes will not be upgraded until the user triggers it manually by running the upgrade action. | +| scheduler-extra-args | string | | [See notes](#scheduler-extra-args-description) | +| service-cidr | string | 10.152.183.0/24 | CIDR to user for Kubernetes services. Cannot be changed after deployment. | +| snap_proxy | string | | DEPRECATED. Use snap-http-proxy and snap-https-proxy model configuration settings. HTTP/HTTPS web proxy for Snappy to use when accessing the snap store. | +| snap_proxy_url | string | | DEPRECATED. Use snap-store-proxy model configuration setting. The address of a Snap Store Proxy to use for snaps e.g. http://snap-proxy.example.com | +| snapd_refresh | string | max | [See notes](#snapd_refresh-description) | +| storage-backend | string | auto | The storage backend for kube-apiserver persistence. Can be "etcd2", "etcd3", or "auto". Auto mode will select etcd3 on new installations, or etcd2 on upgrades. | +| sysctl | string | [See notes](#sysctl-default) | [See notes](#sysctl-description) | + +--- + +### allow-privileged + + + +**Description:** + +Allow kube-apiserver to run in privileged mode. Supported values are +"true", "false", and "auto". If "true", kube-apiserver will run in +privileged mode by default. If "false", kube-apiserver will never run in +privileged mode. If "auto", kube-apiserver will not run in privileged +mode by default, but will switch to privileged mode if gpu hardware is +detected on a worker node. + +[Back to table](#table-allow-privileged) + + +### api-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-apiserver. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-api-extra-args) + + +### audit-policy + + + +**Default:** + +``` +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +# Don't log read-only requests from the apiserver +- level: None + users: ["system:apiserver"] + verbs: ["get", "list", "watch"] +# Don't log kube-proxy watches +- level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - resources: ["endpoints", "services"] +# Don't log nodes getting their own status +- level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - resources: ["nodes"] +# Don't log kube-controller-manager and kube-scheduler getting endpoints +- level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - resources: ["endpoints"] +# Log everything else at the Request level. +- level: Request + omitStages: + - RequestReceived + +``` + + +[Back to table](#table-audit-policy) + + +### controller-manager-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-controller-manager. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-controller-manager being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-controller-manager-extra-args) + + +### dashboard-auth + + + +**Description:** + +Method of authentication for the Kubernetes dashboard. Allowed values are "auto", +"basic", and "token". If set to "auto", basic auth is used unless Keystone is +related to kubernetes-master, in which case token auth is used. + +[Back to table](#table-dashboard-auth) + + +### dns-provider + + + +**Description:** + +DNS provider addon to use. Can be "auto", "core-dns", "kube-dns", or +"none". + +CoreDNS is only supported on Kubernetes 1.14+. + +When set to "auto", the behavior is as follows: +- New deployments of Kubernetes 1.14+ will use CoreDNS +- New deployments of Kubernetes 1.13 or older will use KubeDNS +- Upgraded deployments will continue to use whichever provider was +previously used. + +[Back to table](#table-dns-provider) + + +### image-registry + + + +**Default:** + +``` +rocks.canonical.com:443/cdk +``` + + +[Back to table](#table-image-registry) + + +### install_keys + + + +**Description:** + +List of signing keys for install_sources package sources, per charmhelpers standard format (a yaml list of strings encoded as a string). The keys should be the full ASCII armoured GPG public keys. While GPG key ids are also supported and looked up on a keyserver, operators should be aware that this mechanism is insecure. null can be used if a standard package signing key is used that will already be installed on the machine, and for PPA sources where the package signing key is securely retrieved from Launchpad. + +[Back to table](#table-install_keys) + + +### install_sources + + + +**Description:** + +List of extra apt sources, per charm-helpers standard format (a yaml list of strings encoded as a string). Each source may be either a line that can be added directly to sources.list(5), or in the form ppa:/ for adding Personal Package Archives, or a distribution component to enable. + +[Back to table](#table-install_sources) + + +### keystone-policy + + + +**Default:** + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-auth-policy + namespace: kube-system + labels: + k8s-app: k8s-keystone-auth +data: + policies: | + [ + { + "resource": { + "verbs": ["get", "list", "watch"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-viewers"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "default" + }, + "match": [ + { + "type": "role", + "values": ["k8s-users"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-admins"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + } + ] + +``` + + +[Back to table](#table-keystone-policy) + + +### loadbalancer-ips + + + +**Description:** + +Space separated list of IP addresses of loadbalancers in front of the control plane. +These can be either virtual IP addresses that have been floated in front of the control +plane or the IP of a loadbalancer appliance such as an F5. Workers will alternate IP +addresses from this list to distribute load - for example If you have 2 IPs and 4 workers, +each IP will be used by 2 workers. Note that this will only work if kubeapi-load-balancer +is not in use and there is a relation between kubernetes-master:kube-api-endpoint and +kubernetes-worker:kube-api-endpoint. If using the kubeapi-load-balancer, see the +loadbalancer-ips configuration variable on the kubeapi-load-balancer charm. + +[Back to table](#table-loadbalancer-ips) + + +### nagios_context + + + +**Description:** + +Used by the nrpe subordinate charms. +A string that will be prepended to instance name to set the host name +in nagios. So for instance the hostname would be something like: + +``` + juju-myservice-0 +``` + +If you're running multiple environments with the same services in them +this allows you to differentiate between them. + +[Back to table](#table-nagios_context) + + +### proxy-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-proxy. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-proxy-extra-args) + + +### scheduler-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-scheduler. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-scheduler being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-scheduler-extra-args) + + +### snapd_refresh + + + +**Description:** + +How often snapd handles updates for installed snaps. Setting an empty +string will check 4x per day. Set to "max" to delay the refresh as long +as possible. You may also set a custom string as described in the +'refresh.timer' section here: + https://forum.snapcraft.io/t/system-options/87 + +[Back to table](#table-snapd_refresh) + + +### sysctl + + + +**Default:** + +``` +{ net.ipv4.conf.all.forwarding : 1, net.ipv4.neigh.default.gc_thresh1 : 128, net.ipv4.neigh.default.gc_thresh2 : 28672, net.ipv4.neigh.default.gc_thresh3 : 32768, net.ipv6.neigh.default.gc_thresh1 : 128, net.ipv6.neigh.default.gc_thresh2 : 28672, net.ipv6.neigh.default.gc_thresh3 : 32768, fs.inotify.max_user_instances : 8192, fs.inotify.max_user_watches: 1048576 } +``` + + +[Back to table](#table-sysctl) + + + +**Description:** + +YAML formatted associative array of sysctl values, e.g.: +'{kernel.pid_max : 4194303 }'. Note that kube-proxy handles +the conntrack settings. The proper way to alter them is to +use the proxy-extra-args config to set them, e.g.: + +``` + juju config kubernetes-master proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" +``` + +The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore +kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between +the setting of conntrack-max-per-core vs nf_conntrack_max. + +[Back to table](#table-sysctl) + + + + + + +# Configuring K8s services + +**Charmed Kubernetes** ships with sensible, tested default configurations to +ensure a reliable Kubernetes experience, but of course these can be changed to +reflect the purpose and resources of your cluster. +The configuration section above details all available configuration options, +this section deals with specific, commonly used settings. +You may wish to also read the [Addons page][] for information on the extra +services installed with **Charmed Kubernetes**. + + +## IPVS (IP Virtual Server) + +IPVS implements transport-layer load balancing as part of the Linux kernel, and +can be used by the `kube-proxy` service to handle service routing. By default +`kube-proxy` uses a solution based on iptables, but this can cause a lot of +overhead in systems with large numbers of nodes. There is more information on +this in the upstream Kubernetes [IPVS deep dive][] documentation. + +IPVS is an extra option for kube-proxy, and can be enabled by changing the +configuration: + +``` +juju config kubernetes-master proxy-extra-args="proxy-mode=ipvs" +``` + +It is also necessary to change this configuration option on the worker: + +``` +juju config kubernetes-worker proxy-extra-args="proxy-mode=ipvs" +``` + +## Admission controls + +As with other aspects of the Kubernetes API, admission controls can be +enabled by adding extra values to the charm's +[api-extra-args](#api-extra-args-description) configuration. + +For admission controls, it may be useful to refer to the +[Kubernetes blog][blog-admission] for more information on the options, but +for example, to add the `PodSecurityPolicy` admission controller: + +1. Check any current config settings for `api-extra-args` (there are none by default): + ```bash + juju config kubernetes-master api-extra-args + ``` +2. Append the desired config option to the previous output and apply: + ```bash + juju config kubernetes-master api-extra-args="enable-admission-plugins=PodSecurityPolicy" + ``` + +Note that prior to Kubernetes 1.16 (kubernetes-master revision 778), the config +setting was `admission-control`, rather than `enable-admission-plugins`. + + +## Adding SANs and certificate regeneration + +As explained in the [Certificates and trust overview][certs-and-trust], the +[`extra_sans`](#table-extra_sans) configuration settings can be used to add +SANs and regenerate x509 certificate(s) for the API server running on the +Kubernetes master node(s), and for the load balancer. When this configuration is +changed, the master node(s) will regenerate its certificate and restart the API +server to update the certificate used for communication. Note: This is +disruptive and restarts the API server. + +The process is the same for both the `kubernetes-master` and the +`kubeapi-load-balancer`. The configuration option takes a space-separated list +of extra entries: + +```bash +juju config kubernetes-master extra_sans="master.mydomain.com lb.mydomain.com" +juju config kubeapi-load-balancer extra_sans="master.mydomain.com lb.mydomain.com" +``` +To clear the entries out of the certificate, use an empty string: + +```bash +juju config kubernetes-master extra_sans="" +juju config kubeapi-load-balancer extra_sans="" +``` + +## DNS for the cluster + +The DNS add-on allows pods to have DNS names in addition to IP addresses. +The Kubernetes cluster DNS server (based on the SkyDNS library) supports +forward lookups (A records), service lookups (SRV records) and reverse IP +address lookups (PTR records). More information about the DNS can be obtained +from the [Kubernetes DNS admin guide](http://kubernetes.io/docs/admin/dns/). + +# Actions + + + + +You can run an action with the following + +```bash +juju run-action kubernetes-master ACTION [parameters] [--wait] +``` +
+
+
+ apply-manifest +
+
+
+

+ Apply JSON formatted Kubernetes manifest to cluster +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
json
+

+ The content of the manifest to deploy in JSON format +

+

+ Default: +


+
+
+
+
+
+
+ cis-benchmark +
+
+
+

+ Run the CIS Kubernetes Benchmark against snap-based components. +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
apply
+

+ Apply remediations to address benchmark failures. The default, 'none', will not attempt to fix any reported failures. Set to 'conservative' to resolve simple failures. Set to 'dangerous' to attempt to resolve all failures. Note: Applying any remediation may result in an unusable cluster. +

+

+ Default: none +


+
config
+

+ Archive containing configuration files to use when running kube-bench. The default value is known to be compatible with snap components. When using a custom URL, append '#<hash_type>=<checksum>' to verify the archive integrity when downloaded. +

+

+ Default: https://github.com/charmed-kubernetes/kube-bench-c onfig/archive/cis-1.5.zip#sha1=cb8e78712ee5bfeab87 d0ed7c139a83e88915530 +


+
release
+

+ Set the kube-bench release to run. If set to 'upstream', the action will compile and use a local kube-bench binary built from the master branch of the upstream repository: https://github.com/aquasecurity/kube-bench This value may also be set to an accessible archive containing a pre-built kube-bench binary, for example: https://github.com/aquasecurity/kube- bench/releases/download/v0.0.34/kube-bench_0.0.34_ linux_amd64.tar.gz#sha256=f96d1fcfb84b18324f1299db 074d41ef324a25be5b944e79619ad1a079fca077 +

+

+ Default: https://github.com/aquasecurity/kube- bench/releases/download/v0.2.3/kube-bench_0.2.3_li nux_amd64.tar.gz#sha256=429a1db271689aafec009434de d1dea07a6685fee85a1deea638097c8512d548 +


+
+
+
+
+
+
+ debug +
+
+
+

+ Collect debug data +

+
+
+
+
+
+
+ get-kubeconfig +
+
+
+

+ Retrieve Kubernetes cluster config, including credentials +

+
+
+
+
+
+
+ namespace-create +
+
+
+

+ Create new namespace +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
name
+

+ Namespace name eg. staging +

+

+ Default: +


+
+
+
+
+
+
+ namespace-delete +
+
+
+

+ Delete namespace +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
name
+

+ Namespace name eg. staging +

+

+ Default: +


+
+
+
+
+
+
+ namespace-list +
+
+
+

+ List existing k8s namespaces +

+
+
+
+
+
+
+ restart +
+
+
+

+ Restart the Kubernetes master services on demand. +

+
+
+
+
+
+
+ upgrade +
+
+
+

+ Upgrade the kubernetes snaps +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
fix-cluster-name
+

+ If using the OpenStack cloud provider, whether to fix the cluster name sent to it to include the cluster tag. This fixes an issue with load balancers conflicting with other clusters in the same project but will cause new load balancers to be created which will require manual intervention to resolve. +

+

+ Default: True +


+
+
+
+ + + + + + +# More information + +- [Kubernetes github project](https://github.com/kubernetes/kubernetes) +- [Kubernetes issue tracker](https://github.com/kubernetes/kubernetes/issues) +- [Kubernetes documentation](http://kubernetes.io/docs/) +- [Kubernetes releases](https://github.com/kubernetes/kubernetes/releases) + + +[IPVS deep dive]: https://kubernetes.io/blog/2018/07/09/ipvs-based-in-cluster-load-balancing-deep-dive/ +[blog-admission]: https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/ +[Addons page]: /kubernetes/docs/cdk-addons +[certs-and-trust]: /kubernetes/docs/certs-and-trust diff --git a/kubernetes-control-plane/docs/status.md b/kubernetes-control-plane/docs/status.md new file mode 100644 index 0000000..c6cceab --- /dev/null +++ b/kubernetes-control-plane/docs/status.md @@ -0,0 +1,91 @@ +

WorkloadState

+ +```python +WorkloadState(self, /, *args, **kwargs) +``` + +Enum of the valid workload states. + +Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + +

maintenance

+ +```python +maintenance(message) +``` + +Set the status to the `MAINTENANCE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

maint

+ +```python +maint(message) +``` + +Shorthand alias for +[maintenance](status.md#charms.layer.status.maintenance). + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

blocked

+ +```python +blocked(message) +``` + +Set the status to the `BLOCKED` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

waiting

+ +```python +waiting(message) +``` + +Set the status to the `WAITING` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

active

+ +```python +active(message) +``` + +Set the status to the `ACTIVE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

status_set

+ +```python +status_set(workload_state, message) +``` + +Set the status to the given workload state with a message. + +__Parameters__ + +- __`workload_state` (WorkloadState or str)__: State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. +- __`message` (str)__: Message to convey to the operator. + diff --git a/kubernetes-control-plane/docs/vault-kv.md b/kubernetes-control-plane/docs/vault-kv.md new file mode 100644 index 0000000..8408256 --- /dev/null +++ b/kubernetes-control-plane/docs/vault-kv.md @@ -0,0 +1,98 @@ +

charms.layer.vault_kv

+ + +

VaultNotReady

+ +```python +VaultNotReady(self, /, *args, **kwargs) +``` + +Exception indicating that Vault was accessed before it was ready. + +

VaultUnitKV

+ +```python +VaultUnitKV(self) +``` + +A simplified interface for storing data in Vault, with the data scoped to +the current unit. + +Keys must be strings, but data can be structured as long as it is +JSON-serializable. + +This class can be used as a dict, or you can use `self.get` and `self.set` +for a more KV-like interface. When values are set, via either style, they +are immediately persisted to Vault. Values are also cached in memory. + +Note: This class is a singleton. + +

VaultAppKV

+ +```python +VaultAppKV(self) +``` + +A simplified interface for storing data in Vault, with data shared by every +unit of the application. + +Keys must be strings, but data can be structured as long as it is +JSON-serializable. + +This class can be used as a dict, or you can use `self.get` and `self.set` +for a more KV-like interface. When values are set, via either style, they +are immediately persisted to Vault. Values are also cached in memory. + +Note: This class is a singleton. + +

is_changed

+ +```python +VaultAppKV.is_changed(self, key) +``` + +Determine if the value for the given key has changed since the last +time `self.update_hashes()` has been called. + +In order to detect changes, hashes of the values are also sotred +in Vault. + +

update_hashes

+ +```python +VaultAppKV.update_hashes(self) +``` + +Update the hashes in Vault, thus marking all fields as unchanged. + +This is done automatically at exit. + +

get_vault_config

+ +```python +get_vault_config() +``` + +Get the config data needed for this application to access Vault. + +This is only needed if you're using another application, such as +VaultLocker, using the secrets backend provided by this layer. + +Returns a dictionary containing the following keys: + + * vault_url + * secret_backend + * role_id + * secret_id + +Note: This data is cached in [UnitData][] so anything with access to that +could access Vault as this application. + +If any of this data changes (such as the secret_id being rotated), this +layer will set the `layer.vault-kv.config.changed` flag. + +If this is called before the Vault relation is available, it will raise +`VaultNotReady`. + +[UnitData]: https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.unitdata.html + diff --git a/kubernetes-control-plane/docs/vaultlocker.md b/kubernetes-control-plane/docs/vaultlocker.md new file mode 100644 index 0000000..e30f255 --- /dev/null +++ b/kubernetes-control-plane/docs/vaultlocker.md @@ -0,0 +1,49 @@ +

charms.layer.vaultlocker

+ + +

encrypt_storage

+ +```python +encrypt_storage(storage_name, mountbase=None) +``` + +Set up encryption for the given Juju storage entry, and optionally create +and mount XFS filesystems on the encrypted storage entry location(s). + +Note that the storage entry **must** be defined with ``type: block``. + +If ``mountbase`` is not given, the location(s) will not be formatted or +mounted. When interacting with or mounting the location(s) manually, the +name returned by :func:`decrypted_device` called on the storage entry's +location should be used in place of the raw location. + +If the storage is defined as ``multiple``, the individual locations +will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}`` +is based on the storage ID. Otherwise, the storage will mounted at +``{mountbase}/{storage_name}``. + +

encrypt_device

+ +```python +encrypt_device(device, mountpoint=None) +``` + +Set up encryption for the given block device, and optionally create and +mount an XFS filesystem on the encrypted device. + +If ``mountpoint`` is not given, the device will not be formatted or +mounted. When interacting with or mounting the device manually, the +name returned by :func:`decrypted_device` called on the device name +should be used in place of the raw device name. + +

decrypted_device

+ +```python +decrypted_device(device) +``` + +Returns the mapped device name for the decrypted version of the encrypted +device. + +This mapped device name is what should be used for mounting the device. + diff --git a/kubernetes-control-plane/exec.d/docker-compose/charm-pre-install b/kubernetes-control-plane/exec.d/docker-compose/charm-pre-install new file mode 100644 index 0000000..f0202c5 --- /dev/null +++ b/kubernetes-control-plane/exec.d/docker-compose/charm-pre-install @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# This stubs out charm-pre-install coming from layer-docker as a workaround for +# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed. diff --git a/kubernetes-control-plane/exec.d/vmware-patch/charm-pre-install b/kubernetes-control-plane/exec.d/vmware-patch/charm-pre-install new file mode 100755 index 0000000..b5e6d97 --- /dev/null +++ b/kubernetes-control-plane/exec.d/vmware-patch/charm-pre-install @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +MY_HOSTNAME=$(hostname) + +: ${JUJU_UNIT_NAME:=`uuidgen`} + + +if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then + juju-log "Detected broken vsphere integration. Applying hostname override" + + FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -) + juju-log "Setting hostname to $FRIENDLY_HOSTNAME" + if [ ! -f /etc/hostname.orig ]; then + mv /etc/hostname /etc/hostname.orig + fi + echo "${FRIENDLY_HOSTNAME}" > /etc/hostname + hostname $FRIENDLY_HOSTNAME +fi diff --git a/kubernetes-control-plane/hooks/aws-iam-relation-broken b/kubernetes-control-plane/hooks/aws-iam-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-iam-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-iam-relation-changed b/kubernetes-control-plane/hooks/aws-iam-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-iam-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-iam-relation-created b/kubernetes-control-plane/hooks/aws-iam-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-iam-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-iam-relation-departed b/kubernetes-control-plane/hooks/aws-iam-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-iam-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-iam-relation-joined b/kubernetes-control-plane/hooks/aws-iam-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-iam-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-relation-broken b/kubernetes-control-plane/hooks/aws-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-relation-changed b/kubernetes-control-plane/hooks/aws-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-relation-created b/kubernetes-control-plane/hooks/aws-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-relation-departed b/kubernetes-control-plane/hooks/aws-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/aws-relation-joined b/kubernetes-control-plane/hooks/aws-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/aws-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/azure-relation-broken b/kubernetes-control-plane/hooks/azure-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/azure-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/azure-relation-changed b/kubernetes-control-plane/hooks/azure-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/azure-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/azure-relation-created b/kubernetes-control-plane/hooks/azure-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/azure-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/azure-relation-departed b/kubernetes-control-plane/hooks/azure-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/azure-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/azure-relation-joined b/kubernetes-control-plane/hooks/azure-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/azure-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-client-relation-broken b/kubernetes-control-plane/hooks/ceph-client-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-client-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-client-relation-changed b/kubernetes-control-plane/hooks/ceph-client-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-client-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-client-relation-created b/kubernetes-control-plane/hooks/ceph-client-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-client-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-client-relation-departed b/kubernetes-control-plane/hooks/ceph-client-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-client-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-client-relation-joined b/kubernetes-control-plane/hooks/ceph-client-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-client-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-storage-relation-broken b/kubernetes-control-plane/hooks/ceph-storage-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-storage-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-storage-relation-changed b/kubernetes-control-plane/hooks/ceph-storage-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-storage-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-storage-relation-created b/kubernetes-control-plane/hooks/ceph-storage-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-storage-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-storage-relation-departed b/kubernetes-control-plane/hooks/ceph-storage-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-storage-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ceph-storage-relation-joined b/kubernetes-control-plane/hooks/ceph-storage-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ceph-storage-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/certificates-relation-broken b/kubernetes-control-plane/hooks/certificates-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/certificates-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/certificates-relation-changed b/kubernetes-control-plane/hooks/certificates-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/certificates-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/certificates-relation-created b/kubernetes-control-plane/hooks/certificates-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/certificates-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/certificates-relation-departed b/kubernetes-control-plane/hooks/certificates-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/certificates-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/certificates-relation-joined b/kubernetes-control-plane/hooks/certificates-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/certificates-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/cni-relation-broken b/kubernetes-control-plane/hooks/cni-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/cni-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/cni-relation-changed b/kubernetes-control-plane/hooks/cni-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/cni-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/cni-relation-created b/kubernetes-control-plane/hooks/cni-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/cni-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/cni-relation-departed b/kubernetes-control-plane/hooks/cni-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/cni-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/cni-relation-joined b/kubernetes-control-plane/hooks/cni-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/cni-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/collect-metrics b/kubernetes-control-plane/hooks/collect-metrics new file mode 100755 index 0000000..8a27863 --- /dev/null +++ b/kubernetes-control-plane/hooks/collect-metrics @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +import yaml +import os +from subprocess import check_output, check_call, CalledProcessError + + +def build_command(doc): + values = {} + metrics = doc.get("metrics", {}) + for metric, mdoc in metrics.items(): + if not mdoc: + continue + cmd = mdoc.get("command") + if cmd: + try: + value = check_output(cmd, shell=True, universal_newlines=True) + except CalledProcessError as e: + check_call(['juju-log', '-lERROR', + 'Error collecting metric {}:\n{}'.format( + metric, e.output)]) + continue + value = value.strip() + if value: + values[metric] = value + + if not values: + return None + command = ["add-metric"] + for metric, value in values.items(): + command.append("%s=%s" % (metric, value)) + return command + + +if __name__ == '__main__': + charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, ".."))) + metrics_yaml = os.path.join(charm_dir, "metrics.yaml") + with open(metrics_yaml) as f: + doc = yaml.load(f) + command = build_command(doc) + if command: + check_call(command) diff --git a/kubernetes-control-plane/hooks/config-changed b/kubernetes-control-plane/hooks/config-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/config-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/container-runtime-relation-broken b/kubernetes-control-plane/hooks/container-runtime-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/container-runtime-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/container-runtime-relation-changed b/kubernetes-control-plane/hooks/container-runtime-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/container-runtime-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/container-runtime-relation-created b/kubernetes-control-plane/hooks/container-runtime-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/container-runtime-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/container-runtime-relation-departed b/kubernetes-control-plane/hooks/container-runtime-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/container-runtime-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/container-runtime-relation-joined b/kubernetes-control-plane/hooks/container-runtime-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/container-runtime-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/coordinator-relation-broken b/kubernetes-control-plane/hooks/coordinator-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/coordinator-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/coordinator-relation-changed b/kubernetes-control-plane/hooks/coordinator-relation-changed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-control-plane/hooks/coordinator-relation-changed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-control-plane/hooks/coordinator-relation-created b/kubernetes-control-plane/hooks/coordinator-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/coordinator-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/coordinator-relation-departed b/kubernetes-control-plane/hooks/coordinator-relation-departed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-control-plane/hooks/coordinator-relation-departed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-control-plane/hooks/coordinator-relation-joined b/kubernetes-control-plane/hooks/coordinator-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/coordinator-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/dns-provider-relation-broken b/kubernetes-control-plane/hooks/dns-provider-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/dns-provider-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/dns-provider-relation-changed b/kubernetes-control-plane/hooks/dns-provider-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/dns-provider-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/dns-provider-relation-created b/kubernetes-control-plane/hooks/dns-provider-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/dns-provider-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/dns-provider-relation-departed b/kubernetes-control-plane/hooks/dns-provider-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/dns-provider-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/dns-provider-relation-joined b/kubernetes-control-plane/hooks/dns-provider-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/dns-provider-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/etcd-relation-broken b/kubernetes-control-plane/hooks/etcd-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/etcd-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/etcd-relation-changed b/kubernetes-control-plane/hooks/etcd-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/etcd-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/etcd-relation-created b/kubernetes-control-plane/hooks/etcd-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/etcd-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/etcd-relation-departed b/kubernetes-control-plane/hooks/etcd-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/etcd-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/etcd-relation-joined b/kubernetes-control-plane/hooks/etcd-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/etcd-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/external-cloud-provider-relation-broken b/kubernetes-control-plane/hooks/external-cloud-provider-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/external-cloud-provider-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/external-cloud-provider-relation-changed b/kubernetes-control-plane/hooks/external-cloud-provider-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/external-cloud-provider-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/external-cloud-provider-relation-created b/kubernetes-control-plane/hooks/external-cloud-provider-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/external-cloud-provider-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/external-cloud-provider-relation-departed b/kubernetes-control-plane/hooks/external-cloud-provider-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/external-cloud-provider-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/external-cloud-provider-relation-joined b/kubernetes-control-plane/hooks/external-cloud-provider-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/external-cloud-provider-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/gcp-relation-broken b/kubernetes-control-plane/hooks/gcp-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/gcp-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/gcp-relation-changed b/kubernetes-control-plane/hooks/gcp-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/gcp-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/gcp-relation-created b/kubernetes-control-plane/hooks/gcp-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/gcp-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/gcp-relation-departed b/kubernetes-control-plane/hooks/gcp-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/gcp-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/gcp-relation-joined b/kubernetes-control-plane/hooks/gcp-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/gcp-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/grafana-relation-broken b/kubernetes-control-plane/hooks/grafana-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/grafana-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/grafana-relation-changed b/kubernetes-control-plane/hooks/grafana-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/grafana-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/grafana-relation-created b/kubernetes-control-plane/hooks/grafana-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/grafana-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/grafana-relation-departed b/kubernetes-control-plane/hooks/grafana-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/grafana-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/grafana-relation-joined b/kubernetes-control-plane/hooks/grafana-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/grafana-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ha-relation-broken b/kubernetes-control-plane/hooks/ha-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ha-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ha-relation-changed b/kubernetes-control-plane/hooks/ha-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ha-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ha-relation-created b/kubernetes-control-plane/hooks/ha-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ha-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ha-relation-departed b/kubernetes-control-plane/hooks/ha-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ha-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/ha-relation-joined b/kubernetes-control-plane/hooks/ha-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/ha-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/hook.template b/kubernetes-control-plane/hooks/hook.template new file mode 100644 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/hook.template @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/install b/kubernetes-control-plane/hooks/install new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/install @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/keystone-credentials-relation-broken b/kubernetes-control-plane/hooks/keystone-credentials-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/keystone-credentials-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/keystone-credentials-relation-changed b/kubernetes-control-plane/hooks/keystone-credentials-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/keystone-credentials-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/keystone-credentials-relation-created b/kubernetes-control-plane/hooks/keystone-credentials-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/keystone-credentials-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/keystone-credentials-relation-departed b/kubernetes-control-plane/hooks/keystone-credentials-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/keystone-credentials-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/keystone-credentials-relation-joined b/kubernetes-control-plane/hooks/keystone-credentials-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/keystone-credentials-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-api-endpoint-relation-broken b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-api-endpoint-relation-changed b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-api-endpoint-relation-created b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-api-endpoint-relation-departed b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-api-endpoint-relation-joined b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-api-endpoint-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-control-relation-broken b/kubernetes-control-plane/hooks/kube-control-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-control-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-control-relation-changed b/kubernetes-control-plane/hooks/kube-control-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-control-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-control-relation-created b/kubernetes-control-plane/hooks/kube-control-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-control-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-control-relation-departed b/kubernetes-control-plane/hooks/kube-control-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-control-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-control-relation-joined b/kubernetes-control-plane/hooks/kube-control-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-control-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-masters-relation-broken b/kubernetes-control-plane/hooks/kube-masters-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-masters-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-masters-relation-changed b/kubernetes-control-plane/hooks/kube-masters-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-masters-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-masters-relation-created b/kubernetes-control-plane/hooks/kube-masters-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-masters-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-masters-relation-departed b/kubernetes-control-plane/hooks/kube-masters-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-masters-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/kube-masters-relation-joined b/kubernetes-control-plane/hooks/kube-masters-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/kube-masters-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/leader-elected b/kubernetes-control-plane/hooks/leader-elected new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-control-plane/hooks/leader-elected @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-control-plane/hooks/leader-settings-changed b/kubernetes-control-plane/hooks/leader-settings-changed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-control-plane/hooks/leader-settings-changed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-external-relation-broken b/kubernetes-control-plane/hooks/loadbalancer-external-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-external-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-external-relation-changed b/kubernetes-control-plane/hooks/loadbalancer-external-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-external-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-external-relation-created b/kubernetes-control-plane/hooks/loadbalancer-external-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-external-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-external-relation-departed b/kubernetes-control-plane/hooks/loadbalancer-external-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-external-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-external-relation-joined b/kubernetes-control-plane/hooks/loadbalancer-external-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-external-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-internal-relation-broken b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-internal-relation-changed b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-internal-relation-created b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-internal-relation-departed b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-internal-relation-joined b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-internal-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-relation-broken b/kubernetes-control-plane/hooks/loadbalancer-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-relation-changed b/kubernetes-control-plane/hooks/loadbalancer-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-relation-created b/kubernetes-control-plane/hooks/loadbalancer-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-relation-departed b/kubernetes-control-plane/hooks/loadbalancer-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/loadbalancer-relation-joined b/kubernetes-control-plane/hooks/loadbalancer-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/loadbalancer-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/nrpe-external-master-relation-broken b/kubernetes-control-plane/hooks/nrpe-external-master-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/nrpe-external-master-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/nrpe-external-master-relation-changed b/kubernetes-control-plane/hooks/nrpe-external-master-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/nrpe-external-master-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/nrpe-external-master-relation-created b/kubernetes-control-plane/hooks/nrpe-external-master-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/nrpe-external-master-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/nrpe-external-master-relation-departed b/kubernetes-control-plane/hooks/nrpe-external-master-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/nrpe-external-master-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/nrpe-external-master-relation-joined b/kubernetes-control-plane/hooks/nrpe-external-master-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/nrpe-external-master-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/openstack-relation-broken b/kubernetes-control-plane/hooks/openstack-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/openstack-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/openstack-relation-changed b/kubernetes-control-plane/hooks/openstack-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/openstack-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/openstack-relation-created b/kubernetes-control-plane/hooks/openstack-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/openstack-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/openstack-relation-departed b/kubernetes-control-plane/hooks/openstack-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/openstack-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/openstack-relation-joined b/kubernetes-control-plane/hooks/openstack-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/openstack-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/post-series-upgrade b/kubernetes-control-plane/hooks/post-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/post-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/pre-series-upgrade b/kubernetes-control-plane/hooks/pre-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/pre-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/prometheus-relation-broken b/kubernetes-control-plane/hooks/prometheus-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/prometheus-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/prometheus-relation-changed b/kubernetes-control-plane/hooks/prometheus-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/prometheus-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/prometheus-relation-created b/kubernetes-control-plane/hooks/prometheus-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/prometheus-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/prometheus-relation-departed b/kubernetes-control-plane/hooks/prometheus-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/prometheus-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/prometheus-relation-joined b/kubernetes-control-plane/hooks/prometheus-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/prometheus-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/LICENSE b/kubernetes-control-plane/hooks/relations/aws-iam/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-iam/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/README.md b/kubernetes-control-plane/hooks/relations/aws-iam/README.md new file mode 100644 index 0000000..7d6989b --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-iam/README.md @@ -0,0 +1,47 @@ +# aws-iam interface + +This interface provides communication between +[kubernetes-control-plane](https://github.com/charmed-kubernetes/charm-kubernetes-master) +and [aws-iam](https://github.com/charmed-kubernetes/charm-aws-iam) +subordinate. + +It allows the requires side, aws-iam, to know when the api server is +up and available and to tell the api server when the webhook.yaml +file is written so that it may restart and use the webhook. + +## Provides (kubernetes-control-plane side) + +### States + * `aws-iam.available` + Indicates that there are one or more units on the other side + of the relation + * `aws-iam.ready` + Indicates that the webhook status has been set. This is used + to indicate it is time to restart the API server to pick up + the webhook config on the Kubernetes side. +### Methods + * `get_cluster_id` + The AWS-IAM charm generates a random cluster ID for the cluster + that is needed in the kubectl configuration file. This is + retrieved from the relation here. + * `set_api_server_status` + This is set to indicate if the Kubernetes API server is up and + ready for connections. This is needed because the aws-iam charm + needs to set up the service it will use in order to add the IP + to the extra sans in the ssl certificate used to secure + communication between the control-plane and the service. + +## Requires (aws-iam side) + +### States + * `aws-iam.available` + Indicates that there are one or more units on the other + side of the relation +### Methods + * `set_cluster_id` + The AWS-IAM charm generates a random cluster ID for the + cluster that is needed in the kubectl configuration file. + This is passed over the relation here. + * `set_webhook_status` + Called to set that the webhook configuration has been written + to disk. \ No newline at end of file diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/__init__.py b/kubernetes-control-plane/hooks/relations/aws-iam/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/interface.yaml b/kubernetes-control-plane/hooks/relations/aws-iam/interface.yaml new file mode 100644 index 0000000..5a0e0dc --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-iam/interface.yaml @@ -0,0 +1,4 @@ +name: aws-iam +summary: Used to integrate AWS IAM into kubernetes-control-plane charm +version: 1 +maintainer: "Mike Wilson " diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/provides.py b/kubernetes-control-plane/hooks/relations/aws-iam/provides.py new file mode 100644 index 0000000..5e464fe --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-iam/provides.py @@ -0,0 +1,35 @@ +from charms.reactive import Endpoint +from charms.reactive import toggle_flag + + +# kubernetes-control-plane side +class AWSIAMProvides(Endpoint): + + # called automagically before any decorated handlers, but after + # flags are set + def manage_flags(self): + # we want to make sure all the templates and stuff are written + # and pods started before we switch the API server over to + # use the webhook. This is critical for the webhook template + # since the API server will crash if the file isn't there. + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.ready'), + self.is_joined and all(unit.received['webhook_status'] + for unit in self.all_joined_units)) + + def get_cluster_id(self): + """ Gets randomly generated cluster ID. """ + + return self.all_joined_units.received['cluster_id'] + + def set_api_server_status(self, status): + """ Sets the status of the Kubernetes API server. + + Args: + status: Boolean value. True when API server is started + and ready to receive requests. + """ + + for relation in self.relations: + relation.to_publish['api_server_state'] = status diff --git a/kubernetes-control-plane/hooks/relations/aws-iam/requires.py b/kubernetes-control-plane/hooks/relations/aws-iam/requires.py new file mode 100644 index 0000000..960c265 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-iam/requires.py @@ -0,0 +1,36 @@ +from charms.reactive import Endpoint +from charms.reactive import toggle_flag + + +# aws-iam side +class AWSIAMRequires(Endpoint): + + # called automagically before any decorated handlers, but after + # flags are set + def manage_flags(self): + # kubectl is used to deploy the webhook pod. This means that + # the api server needs to be up in order to do that. So we + # wait until the cluster is up before trying. + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined and all(unit.received['api_server_state'] + for unit in self.all_joined_units)) + + def set_webhook_status(self, status): + """ Sets the status of the webhook configuration file. + + Args: + status: Boolean value. True when webhook configuration has been + written to disk and the API server can be configured to + pick that up and restart. + """ + for relation in self.relations: + relation.to_publish['webhook_status'] = status + + def set_cluster_id(self, id): + """ Sets the randomly generated cluster id. The cluster ID is just + a unique value to identify this cluster for AWS-IAM. It is needed + by the API server for the kubectl configuration file. + """ + + for relation in self.relations: + relation.to_publish['cluster_id'] = id diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/.gitignore b/kubernetes-control-plane/hooks/relations/aws-integration/.gitignore new file mode 100644 index 0000000..ba1431e --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/.gitignore @@ -0,0 +1,2 @@ +.tox +__pycache__ diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/LICENSE b/kubernetes-control-plane/hooks/relations/aws-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/README.md b/kubernetes-control-plane/hooks/relations/aws-integration/README.md new file mode 100644 index 0000000..59abfcf --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `aws-integration` interface communciation protocol +and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:aws-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:aws-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `aws-integration` interface protocol: + +```yaml +requires: + aws: + interface: aws-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the aws-integrator charm) diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/__init__.py b/kubernetes-control-plane/hooks/relations/aws-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/copyright b/kubernetes-control-plane/hooks/relations/aws-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/docs/provides.md b/kubernetes-control-plane/hooks/relations/aws-integration/docs/provides.md new file mode 100644 index 0000000..57ecb25 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/docs/provides.md @@ -0,0 +1,179 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the AWS +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for AWS integration features. + The AWS integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

AWSIntegrationProvides

+ +```python +AWSIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.aws.requested') +def handle_requests(): + aws = endpoint_from_flag('endpoint.aws.requested') + for request in aws.requests: + if request.instance_tags: + tag_instance( + request.instance_id, + request.region, + request.instance_tags) + if request.requested_load_balancer_management: + layer.aws.enable_load_balancer_management( + request.application_name, + request.instance_id, + request.region, + ) + # ... + request.mark_completed() +``` + +

application_names

+ + +Set of names of all applications that are still joined. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

unit_instances

+ + +Mapping of unit names to instance IDs and regions for all joined units. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

changed

+ + +Whether this request has changed since the last time it was +marked completed. + +

hash

+ + +SHA hash of the data for this request. + +

instance_id

+ + +The instance ID reported for this request. + +

instance_security_group_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance's +machine-specific security group (firewall). + +

instance_subnet_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance's +subnet. + +

instance_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance. + +

object_storage_access_patterns

+ + +List of patterns to which to restrict object storage access. + +

object_storage_management_patterns

+ + +List of patterns to which to restrict object storage management. + +

region

+ + +The region reported for this request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_load_balancer_management

+ + +Flag indicating whether load balancer management was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking (firewalls, +subnets, etc) was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

unit_name

+ + +The name of the unit making the request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + +

clear

+ +```python +IntegrationRequest.clear(self) +``` + +Clear this request's cached data. + diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/docs/requires.md b/kubernetes-control-plane/hooks/relations/aws-integration/docs/requires.md new file mode 100644 index 0000000..41607f4 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/docs/requires.md @@ -0,0 +1,178 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with AWS native features. The integration will +be provided by the AWS integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of AWS specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific AWS features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the AWS instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

AWSIntegrationRequires

+ +```python +AWSIntegrationRequires(self, *args, **kwargs) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.aws.joined') +def request_aws_integration(): + aws = endpoint_from_flag('endpoint.aws.joined') + aws.request_instance_tags({ + 'tag1': 'value1', + 'tag2': None, + }) + aws.request_load_balancer_management() + # ... + +@when('endpoint.aws.ready') +def aws_integration_ready(): + update_config_enable_aws() +``` + +

instance_id

+ + +This unit's instance-id. + +

region

+ + +The region this unit is in. + +

tag_instance

+ +```python +AWSIntegrationRequires.tag_instance(self, tags) +``` + +Request that the given tags be applied to this instance. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

tag_instance_security_group

+ +```python +AWSIntegrationRequires.tag_instance_security_group(self, tags) +``` + +Request that the given tags be applied to this instance's +machine-specific security group (firewall) created by Juju. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

tag_instance_subnet

+ +```python +AWSIntegrationRequires.tag_instance_subnet(self, tags) +``` + +Request that the given tags be applied to this instance's subnet. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

enable_acm_readonly

+ +```python +AWSIntegrationRequires.enable_acm_readonly(self) +``` + +Request readonly for ACM. + +

enable_acm_fullaccess

+ +```python +AWSIntegrationRequires.enable_acm_fullaccess(self) +``` + +Request fullaccess for ACM. + +

enable_instance_inspection

+ +```python +AWSIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +AWSIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking (firewalls, subnets, etc). + +

enable_load_balancer_management

+ +```python +AWSIntegrationRequires.enable_load_balancer_management(self) +``` + +Request the ability to manage load balancers. + +

enable_block_storage_management

+ +```python +AWSIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +AWSIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +AWSIntegrationRequires.enable_object_storage_access(self, patterns=None) +``` + +Request the ability to access object storage. + +__Parameters__ + +- __`patterns` (list)__: If given, restrict access to the resources matching + the patterns. If patterns do not start with the S3 ARN prefix +- __(`arn__:aws:s3:::`), it will be prepended. + +

enable_object_storage_management

+ +```python +AWSIntegrationRequires.enable_object_storage_management(self, patterns=None) +``` + +Request the ability to manage object storage. + +__Parameters__ + +- __`patterns` (list)__: If given, restrict management to the resources + matching the patterns. If patterns do not start with the S3 ARN +- __prefix (`arn__:aws:s3:::`), it will be prepended. + diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/interface.yaml b/kubernetes-control-plane/hooks/relations/aws-integration/interface.yaml new file mode 100644 index 0000000..fe3da6d --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/interface.yaml @@ -0,0 +1,4 @@ +name: aws-integration +summary: Interface for connecting to the AWS integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/make_docs b/kubernetes-control-plane/hooks/relations/aws-integration/make_docs new file mode 100644 index 0000000..72b69c2 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'aws': {'interface': 'aws-integration'}}, + 'provides': {'aws': {'interface': 'aws-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/provides.py b/kubernetes-control-plane/hooks/relations/aws-integration/provides.py new file mode 100644 index 0000000..ae94211 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/provides.py @@ -0,0 +1,288 @@ +""" +This is the provides side of the interface layer, for use only by the AWS +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for AWS integration features. + The AWS integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +import json +from hashlib import sha256 + +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class AWSIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.aws.requested') + def handle_requests(): + aws = endpoint_from_flag('endpoint.aws.requested') + for request in aws.requests: + if request.instance_tags: + tag_instance( + request.instance_id, + request.region, + request.instance_tags) + if request.requested_load_balancer_management: + layer.aws.enable_load_balancer_management( + request.application_name, + request.instance_id, + request.region, + ) + # ... + request.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + requests = self.requests + toggle_flag(self.expand_name('requested'), len(requests) > 0) + clear_flag(self.expand_name('changed')) + + @when('endpoint.{endpoint_name}.departed') + def cleanup(self): + for unit in self.all_departed_units: + request = IntegrationRequest(unit) + request.clear() + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + return [request for request in self.all_requests if request.changed] + + @property + def all_requests(self): + """ + A list of all the #IntegrationRequests that have been made, + even if unchanged. + """ + return [IntegrationRequest(unit) for unit in self.all_joined_units] + + @property + def application_names(self): + """ + Set of names of all applications that are still joined. + """ + return {unit.application_name for unit in self.all_joined_units} + + @property + def unit_instances(self): + """ + Mapping of unit names to instance IDs and regions for all joined units. + """ + return { + unit.unit_name: { + 'instance-id': unit.received['instance-id'], + 'region': unit.received['region'], + } for unit in self.all_joined_units + } + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + self._hash = sha256(json.dumps(dict(unit.received), + sort_keys=True).encode('utf8') + ).hexdigest() + + @property + def hash(self): + """ + SHA hash of the data for this request. + """ + return self._hash + + @property + def _hash_key(self): + endpoint = self._unit.relation.endpoint + return endpoint.expand_name('request.{}'.format(self.instance_id)) + + @property + def changed(self): + """ + Whether this request has changed since the last time it was + marked completed. + """ + if not (self.instance_id and self._requested): + return False + saved_hash = unitdata.kv().get(self._hash_key) + result = saved_hash != self.hash + return result + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._unit.relation.to_publish.get('completed', {}) + completed[self.instance_id] = self.hash + unitdata.kv().set(self._hash_key, self.hash) + self._unit.relation.to_publish['completed'] = completed + + def clear(self): + """ + Clear this request's cached data. + """ + unitdata.kv().unset(self._hash_key) + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def instance_id(self): + """ + The instance ID reported for this request. + """ + return self._unit.received['instance-id'] + + @property + def region(self): + """ + The region reported for this request. + """ + return self._unit.received['region'] + + @property + def instance_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-tags', {})) + + @property + def instance_security_group_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance's + machine-specific security group (firewall). + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-security-group-tags', + {})) + + @property + def instance_subnet_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance's + subnet. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-subnet-tags', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_acm_readonly(self): + """ + Flag indicating whether acm readonly was requested. + """ + return bool(self._unit.received['enable-acm-readonly']) + + @property + def requested_acm_fullaccess(self): + """ + Flag indicating whether acm fullaccess was requested. + """ + return bool(self._unit.received['enable-acm-fullaccess']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking (firewalls, + subnets, etc) was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_load_balancer_management(self): + """ + Flag indicating whether load balancer management was requested. + """ + return bool(self._unit.received['enable-load-balancer-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def object_storage_access_patterns(self): + """ + List of patterns to which to restrict object storage access. + """ + return list( + self._unit.received['object-storage-access-patterns'] or []) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) + + @property + def object_storage_management_patterns(self): + """ + List of patterns to which to restrict object storage management. + """ + return list( + self._unit.received['object-storage-management-patterns'] or []) diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/pydocmd.yml b/kubernetes-control-plane/hooks/relations/aws-integration/pydocmd.yml new file mode 100644 index 0000000..70a2e75 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'AWS Integration Interface' + +generate: + - requires.md: + - requires + - requires.AWSIntegrationRequires+ + - provides.md: + - provides + - provides.AWSIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/aws-integration/requires.py b/kubernetes-control-plane/hooks/relations/aws-integration/requires.py new file mode 100644 index 0000000..c457e02 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/aws-integration/requires.py @@ -0,0 +1,262 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with AWS native features. The integration will +be provided by the AWS integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of AWS specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific AWS features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the AWS instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import json +import string +from hashlib import sha256 +from urllib.parse import urljoin +from urllib.request import urlopen + +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from AWS metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class AWSIntegrationRequires(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.aws.joined') + def request_aws_integration(): + aws = endpoint_from_flag('endpoint.aws.joined') + aws.request_instance_tags({ + 'tag1': 'value1', + 'tag2': None, + }) + aws.request_load_balancer_management() + # ... + + @when('endpoint.aws.ready') + def aws_integration_ready(): + update_config_enable_aws() + ``` + """ + # the IP is the AWS metadata service, documented here: + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + _metadata_url = 'http://169.254.169.254/latest/meta-data/' + _instance_id_url = urljoin(_metadata_url, 'instance-id') + _az_url = urljoin(_metadata_url, 'placement/availability-zone') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._instance_id = None + self._region = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single AWS integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single AWS integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['instance-id'] = self.instance_id + self._to_publish['region'] = self.region + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + completed = self._received.get('completed', {}) + actual_hash = completed.get(self.instance_id) + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), + self._requested and actual_hash == self._expected_hash) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def instance_id(self): + """ + This unit's instance-id. + """ + if self._instance_id is None: + cache_key = self.expand_name('instance-id') + cached = unitdata.kv().get(cache_key) + if cached: + self._instance_id = cached + else: + with urlopen(self._instance_id_url) as fd: + self._instance_id = fd.read(READ_BLOCK_SIZE).decode('utf8') + unitdata.kv().set(cache_key, self._instance_id) + return self._instance_id + + @property + def region(self): + """ + The region this unit is in. + """ + if self._region is None: + cache_key = self.expand_name('region') + cached = unitdata.kv().get(cache_key) + if cached: + self._region = cached + else: + with urlopen(self._az_url) as fd: + az = fd.read(READ_BLOCK_SIZE).decode('utf8') + self._region = az.rstrip(string.ascii_lowercase) + unitdata.kv().set(cache_key, self._region) + return self._region + + @property + def _expected_hash(self): + return sha256(json.dumps(dict(self._to_publish), + sort_keys=True).encode('utf8')).hexdigest() + + @property + def _requested(self): + # whether or not a request has been issued + return self._to_publish['requested'] + + def _request(self, keyvals): + self._to_publish.update(keyvals) + self._to_publish['requested'] = True + clear_flag(self.expand_name('ready')) + + def tag_instance(self, tags): + """ + Request that the given tags be applied to this instance. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-tags': dict(tags)}) + + def tag_instance_security_group(self, tags): + """ + Request that the given tags be applied to this instance's + machine-specific security group (firewall) created by Juju. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-security-group-tags': dict(tags)}) + + def tag_instance_subnet(self, tags): + """ + Request that the given tags be applied to this instance's subnet. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-subnet-tags': dict(tags)}) + + def enable_acm_readonly(self): + """ + Request readonly for ACM. + """ + self._request({'enable-acm-readonly': True}) + + def enable_acm_fullaccess(self): + """ + Request fullaccess for ACM. + """ + self._request({'enable-acm-fullaccess': True}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking (firewalls, subnets, etc). + """ + self._request({'enable-network-management': True}) + + def enable_load_balancer_management(self): + """ + Request the ability to manage load balancers. + """ + self._request({'enable-load-balancer-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns-management': True}) + + def enable_object_storage_access(self, patterns=None): + """ + Request the ability to access object storage. + + # Parameters + `patterns` (list): If given, restrict access to the resources matching + the patterns. If patterns do not start with the S3 ARN prefix + (`arn:aws:s3:::`), it will be prepended. + """ + if patterns: + for i, pattern in enumerate(patterns): + if not pattern.startswith('arn:aws:s3:::'): + patterns[i] = 'arn:aws:s3:::{}'.format(pattern) + self._request({ + 'enable-object-storage-access': True, + 'object-storage-access-patterns': patterns, + }) + + def enable_object_storage_management(self, patterns=None): + """ + Request the ability to manage object storage. + + # Parameters + `patterns` (list): If given, restrict management to the resources + matching the patterns. If patterns do not start with the S3 ARN + prefix (`arn:aws:s3:::`), it will be prepended. + """ + if patterns: + for i, pattern in enumerate(patterns): + if not pattern.startswith('arn:aws:s3:::'): + patterns[i] = 'arn:aws:s3:::{}'.format(pattern) + self._request({ + 'enable-object-storage-management': True, + 'object-storage-management-patterns': patterns, + }) diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/.gitignore b/kubernetes-control-plane/hooks/relations/azure-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/LICENSE b/kubernetes-control-plane/hooks/relations/azure-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/README.md b/kubernetes-control-plane/hooks/relations/azure-integration/README.md new file mode 100644 index 0000000..ddcae26 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `azure-integration` interface communciation +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:azure-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:azure-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `azure-integration` interface protocol: + +```yaml +requires: + azure: + interface: azure-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the azure-integrator charm) diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/__init__.py b/kubernetes-control-plane/hooks/relations/azure-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/copyright b/kubernetes-control-plane/hooks/relations/azure-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/docs/provides.md b/kubernetes-control-plane/hooks/relations/azure-integration/docs/provides.md new file mode 100644 index 0000000..4348dff --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/docs/provides.md @@ -0,0 +1,175 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the Azure +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for Azure integration features. + The Azure integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

AzureIntegrationProvides

+ +```python +AzureIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.azure.requests-pending') +def handle_requests(): + azure = endpoint_from_flag('endpoint.azure.requests-pending') + for request in azure.requests: + if request.instance_tags: + layer.azure.tag_instance( + request.vm_name, + request.resource_group, + request.instance_tags) + if request.requested_load_balancer_management: + layer.azure.enable_load_balancer_management( + request.charm, + request.vm_name, + request.resource_group, + ) + # ... + azure.mark_completed() +``` + +

relation_ids

+ + +A list of the IDs of all established relations. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

get_departed_charms

+ +```python +AzureIntegrationProvides.get_departed_charms(self) +``` + +Get a list of all charms that have had all units depart since the +last time this was called. + +

mark_completed

+ +```python +AzureIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

charm

+ + +The charm name reported for this request. + +

instance_tags

+ + +Mapping of tag names to values to apply to this instance. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

model_uuid

+ + +The UUID of the model containing the application making this request. + +

relation_id

+ + +The ID of the relation for the unit making the request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

requested_security_management

+ + +Flag indicating whether security management was requested. + +

resource_group

+ + +The resource group reported for this request. + +

unit_name

+ + +The name of the unit making the request. + +

vm_id

+ + +The instance ID reported for this request. + +

vm_name

+ + +The instance name reported for this request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/docs/requires.md b/kubernetes-control-plane/hooks/relations/azure-integration/docs/requires.md new file mode 100644 index 0000000..608b4ee --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/docs/requires.md @@ -0,0 +1,145 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with Azure native features. The integration will +be provided by the Azure integrator charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of Azure specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific Azure features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the Azure instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

AzureIntegrationRequires

+ +```python +AzureIntegrationRequires(self, *args, **kwargs) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.azure.joined') +def request_azure_integration(): + azure = endpoint_from_flag('endpoint.azure.joined') + azure.tag_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + azure.request_load_balancer_management() + # ... + +@when('endpoint.azure.ready') +def azure_integration_ready(): + update_config_enable_azure() +``` + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

resource_group

+ + +The resource group this unit is in. + +

vm_id

+ + +This unit's instance ID. + +

vm_name

+ + +This unit's instance name. + +

tag_instance

+ +```python +AzureIntegrationRequires.tag_instance(self, tags) +``` + +Request that the given tags be applied to this instance. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tags names to values. + +

enable_instance_inspection

+ +```python +AzureIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +AzureIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking. + +

enable_security_management

+ +```python +AzureIntegrationRequires.enable_security_management(self) +``` + +Request the ability to manage security (e.g., firewalls). + +

enable_block_storage_management

+ +```python +AzureIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +AzureIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +AzureIntegrationRequires.enable_object_storage_access(self) +``` + +Request the ability to access object storage. + +

enable_object_storage_management

+ +```python +AzureIntegrationRequires.enable_object_storage_management(self) +``` + +Request the ability to manage object storage. + diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/interface.yaml b/kubernetes-control-plane/hooks/relations/azure-integration/interface.yaml new file mode 100644 index 0000000..a77a7cb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/interface.yaml @@ -0,0 +1,4 @@ +name: azure-integration +summary: Interface for connecting to the Azure integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/make_docs b/kubernetes-control-plane/hooks/relations/azure-integration/make_docs new file mode 100644 index 0000000..84df5ee --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'azure': {'interface': 'azure-integration'}}, + 'provides': {'azure': {'interface': 'azure-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/provides.py b/kubernetes-control-plane/hooks/relations/azure-integration/provides.py new file mode 100644 index 0000000..5ff7d3a --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/provides.py @@ -0,0 +1,275 @@ +""" +This is the provides side of the interface layer, for use only by the Azure +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for Azure integration features. + The Azure integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class AzureIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.azure.requests-pending') + def handle_requests(): + azure = endpoint_from_flag('endpoint.azure.requests-pending') + for request in azure.requests: + if request.instance_tags: + layer.azure.tag_instance( + request.vm_name, + request.resource_group, + request.instance_tags) + if request.requested_load_balancer_management: + layer.azure.enable_load_balancer_management( + request.charm, + request.vm_name, + request.resource_group, + ) + # ... + azure.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + if not hasattr(self, '_requests'): + all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + is_changed = attrgetter('is_changed') + self._requests = list(filter(is_changed, all_requests)) + return self._requests + + @property + def relation_ids(self): + """ + A list of the IDs of all established relations. + """ + return [relation.relation_id for relation in self.relations] + + def get_departed_charms(self): + """ + Get a list of all charms that have had all units depart since the + last time this was called. + """ + joined_charms = {unit.received['charm'] + for unit in self.all_joined_units + if unit.received['charm']} + departed_charms = [unit.received['charm'] + for unit in self.all_departed_units + if unit.received['charm'] not in joined_charms] + self.all_departed_units.clear() + return departed_charms + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + for request in self.requests: + request.mark_completed() + clear_flag(self.expand_name('requests-pending')) + self._requests = [] + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def _completed(self): + return self._to_publish.get('completed', {}) + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + if not all([self.charm, self.vm_id, self.vm_name, + self.resource_group, self._requested]): + return False + return self._completed.get(self.vm_id) != self._requested + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._completed + completed[self.vm_id] = self._requested + self._to_publish['completed'] = completed # have to explicitly update + + def send_additional_metadata(self, resource_group_location, + vnet_name, vnet_resource_group, + subnet_name, security_group_name, + security_group_resource_group, + use_managed_identity=True, aad_client=None, + aad_secret=None, tenant_id=None): + self._to_publish.update({ + 'resource-group-location': resource_group_location, + 'vnet-name': vnet_name, + 'vnet-resource-group': vnet_resource_group, + 'subnet-name': subnet_name, + 'security-group-name': security_group_name, + 'security-group-resource-group': security_group_resource_group, + 'use-managed-identity': use_managed_identity, + 'aad-client': aad_client, + 'aad-client-secret': aad_secret, + 'tenant-id': tenant_id + }) + + @property + def relation_id(self): + """ + The ID of the relation for the unit making the request. + """ + return self._unit.relation.relation_id + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def charm(self): + """ + The charm name reported for this request. + """ + return self._unit.received['charm'] + + @property + def vm_id(self): + """ + The instance ID reported for this request. + """ + return self._unit.received['vm-id'] + + @property + def vm_name(self): + """ + The instance name reported for this request. + """ + return self._unit.received['vm-name'] + + @property + def resource_group(self): + """ + The resource group reported for this request. + """ + return self._unit.received['res-group'] + + @property + def model_uuid(self): + """ + The UUID of the model containing the application making this request. + """ + return self._unit.received['model-uuid'] + + @property + def instance_tags(self): + """ + Mapping of tag names to values to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-tags', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_loadbalancer_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-loadbalancer-management']) + + + @property + def requested_security_management(self): + """ + Flag indicating whether security management was requested. + """ + return bool(self._unit.received['enable-security-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/pydocmd.yml b/kubernetes-control-plane/hooks/relations/azure-integration/pydocmd.yml new file mode 100644 index 0000000..6414c29 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'Azure Integration Interface' + +generate: + - requires.md: + - requires + - requires.AzureIntegrationRequires+ + - provides.md: + - provides + - provides.AzureIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/azure-integration/requires.py b/kubernetes-control-plane/hooks/relations/azure-integration/requires.py new file mode 100644 index 0000000..600d69e --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/azure-integration/requires.py @@ -0,0 +1,298 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with Azure native features. The integration will +be provided by the Azure integrator charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of Azure specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific Azure features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the Azure instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import json +import os +import random +import string +from urllib.request import urlopen, Request + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from Azure metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class AzureIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.azure.joined') + def request_azure_integration(): + azure = endpoint_from_flag('endpoint.azure.joined') + azure.tag_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + azure.request_load_balancer_management() + # ... + + @when('endpoint.azure.ready') + def azure_integration_ready(): + update_config_enable_azure() + ``` + """ + # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + _metadata_url = 'http://169.254.169.254/metadata/instance?api-version=2017-12-01' # noqa + _metadata_headers = {'Metadata': 'true'} + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._vm_metadata = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single Azure integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single Azure integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['charm'] = hookenv.charm_name() + self._to_publish['vm-id'] = self.vm_id + self._to_publish['vm-name'] = self.vm_name + self._to_publish['res-group'] = self.resource_group + self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID'] + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), self.is_ready) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def vm_metadata(self): + if self._vm_metadata is None: + cache_key = self.expand_name('vm-metadata') + cached = unitdata.kv().get(cache_key) + if cached: + self._vm_metadata = cached + else: + req = Request(self._metadata_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + metadata = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._vm_metadata = json.loads(metadata) + unitdata.kv().set(cache_key, self._vm_metadata) + return self._vm_metadata + + @property + def vm_id(self): + """ + This unit's instance ID. + """ + return self.vm_metadata['compute']['vmId'] + + @property + def vm_name(self): + """ + This unit's instance name. + """ + return self.vm_metadata['compute']['name'] + + @property + def vm_location(self): + """ + The location (region) the instance is running in. + """ + return self.vm_metadata['compute']['location'] + + @property + def resource_group(self): + """ + The resource group this unit is in. + """ + return self.vm_metadata['compute']['resourceGroupName'] + + @property + def resource_group_location(self): + """ + The location (region) the resource group is in. + """ + return self._received['resource-group-location'] + + @property + def subscription_id(self): + """ + The ID of the Azure Subscription this unit is in. + """ + return self.vm_metadata['compute']['subscriptionId'] + + @property + def vnet_name(self): + """ + The name of the virtual network the instance is in. + """ + return self._received['vnet-name'] + + @property + def vnet_resource_group(self): + """ + The name of the virtual network the instance is in. + """ + return self._received['vnet-resource-group'] + + @property + def subnet_name(self): + """ + The name of the subnet the instance is in. + """ + return self._received['subnet-name'] + + @property + def security_group_name(self): + """ + The name of the security group attached to the cluster's subnet. + """ + return self._received['security-group-name'] + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + requested = self._to_publish['requested'] + completed = self._received.get('completed', {}).get(self.vm_id) + return requested and requested == completed + + @property + def security_group_resource_group(self): + return self._received['security-group-resource-group'] + + @property + def managed_identity(self): + return self._received['use-managed-identity'] + + @property + def aad_client_id(self): + return self._received['aad-client'] + + @property + def aad_client_secret(self): + return self._received['aad-client-secret'] + + @property + def tenant_id(self): + return self._received['tenant-id'] + + def _request(self, keyvals): + alphabet = string.ascii_letters + string.digits + nonce = ''.join(random.choice(alphabet) for _ in range(8)) + self._to_publish.update(keyvals) + self._to_publish['requested'] = nonce + clear_flag(self.expand_name('ready')) + + def tag_instance(self, tags): + """ + Request that the given tags be applied to this instance. + + # Parameters + `tags` (dict): Mapping of tags names to values. + """ + self._request({'instance-tags': dict(tags)}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-network-management': True}) + + def enable_loadbalancer_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-loadbalancer-management': True}) + + + def enable_security_management(self): + """ + Request the ability to manage security (e.g., firewalls). + """ + self._request({'enable-security-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns': True}) + + def enable_object_storage_access(self): + """ + Request the ability to access object storage. + """ + self._request({'enable-object-storage-access': True}) + + def enable_object_storage_management(self): + """ + Request the ability to manage object storage. + """ + self._request({'enable-object-storage-management': True}) + + diff --git a/kubernetes-control-plane/hooks/relations/ceph-admin/.gitignore b/kubernetes-control-plane/hooks/relations/ceph-admin/.gitignore new file mode 100644 index 0000000..ca3c9ea --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-admin/.gitignore @@ -0,0 +1,2 @@ +.idea +*.swp diff --git a/kubernetes-control-plane/hooks/relations/ceph-admin/README.md b/kubernetes-control-plane/hooks/relations/ceph-admin/README.md new file mode 100644 index 0000000..b0c89db --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-admin/README.md @@ -0,0 +1,41 @@ +# Overview + +**WARNING**: This is an unofficial, untested, and experimental layer from +the community. + +This interface layer handles the communication between the Ceph Monitor +and a client that requires an admin key. + +# Usage + +## Requires + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.available` The ceph client has been related to a provider. + The following accessors will be available: + - key - The admin cephx key + - auth - Whether or not strict auth is supported + - mon_hosts - The public addresses list of the monitor cluster + + +Client example: + +```python +@when('ceph-admin.available') +def ceph_connected(ceph_info): + charm_ceph_conf = os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf') + cephx_key = os.path.join(os.sep, 'etc', 'ceph', 'ceph.client.admin.keyring') + + ceph_context = { + 'auth_supported': ceph_client.auth, + 'mon_hosts': ceph_client.mon_hosts, + } + + with open(charm_ceph_conf, 'w') as cephconf: + cephconf.write(render_template('ceph.conf', ceph_context)) + + # Write out the cephx_key also + with open(cephx_key, 'w') as cephconf: + cephconf.write(ceph_client.key) +``` diff --git a/kubernetes-control-plane/hooks/relations/ceph-admin/__init__.py b/kubernetes-control-plane/hooks/relations/ceph-admin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/ceph-admin/interface.yaml b/kubernetes-control-plane/hooks/relations/ceph-admin/interface.yaml new file mode 100644 index 0000000..45dd6f4 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-admin/interface.yaml @@ -0,0 +1,3 @@ +name: ceph-admin +summary: Ceph Admin Client Interface +version: 1 diff --git a/kubernetes-control-plane/hooks/relations/ceph-admin/requires.py b/kubernetes-control-plane/hooks/relations/ceph-admin/requires.py new file mode 100644 index 0000000..ba07b51 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-admin/requires.py @@ -0,0 +1,23 @@ +from charms.reactive import Endpoint +from charms.reactive import toggle_flag + + +class CephClient(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.available'), + all([self.key(), + self.fsid(), + self.auth(), + self.mon_hosts()])) + + def key(self): + return self.all_joined_units.received_raw['key'] + + def fsid(self): + return self.all_joined_units.received_raw['fsid'] + + def auth(self): + return self.all_joined_units.received_raw['auth'] + + def mon_hosts(self): + return self.all_joined_units.received_raw['mon_hosts'] diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/README.md b/kubernetes-control-plane/hooks/relations/ceph-client/README.md new file mode 100644 index 0000000..18076bd --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/README.md @@ -0,0 +1,43 @@ +# Overview + +This interface layer handles the communication between the Ceph Monitor +cluster and a client that requires an access key and a pool to use. + +# Usage + +## Requires + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.available` The ceph client has been related to a provider. + +The following accessors will be available: + + - key - The cephx access key + - auth - Whether or not strict auth is supported + - mon_hosts - The public addresses list of the monitor cluster + +Client example: + +```python +@when('ceph-client.connected') +def ceph_connected(ceph_client): + ceph_client.create_pool('newpool') + +@when('ceph-client.available') +def ceph_ready(ceph_client): + charm_ceph_conf= os.path.join(os.sep, 'etc', 'ceph', 'ceph.conf') + cephx_key = os.path.join(os.sep, 'etc', 'ceph', 'ceph.client.charm.keyring') + + ceph_context = { + 'auth_supported': ceph_client.auth, + 'mon_hosts': ceph_client.mon_hosts, + } + + with open(charm_ceph_conf, 'w') as cephconf: + cephconf.write(render_template('ceph.conf', ceph_context)) + + # Write out the cephx_key also + with open(cephx_key, 'w') as cephconf: + cephconf.write(ceph_client.key) +``` diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/__init__.py b/kubernetes-control-plane/hooks/relations/ceph-client/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/interface.yaml b/kubernetes-control-plane/hooks/relations/ceph-client/interface.yaml new file mode 100644 index 0000000..8578859 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/interface.yaml @@ -0,0 +1,13 @@ +name: ceph-client +summary: Ceph Client Interface +version: 1 +maintainer: OpenStack Charmers +ignore: + - 'unit_tests' + - 'Makefile' + - '.testr.conf' + - 'test-requirements.txt' + - 'tox.ini' + - '.gitignore' + - '.gitreview' + - '.unit-state.db' \ No newline at end of file diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_provides.py b/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_provides.py new file mode 100644 index 0000000..32ebfd8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_provides.py @@ -0,0 +1,97 @@ +# from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import ( + relation_set, +) +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes +# from charms.reactive import is_state +# from charms.reactive import not_unless + + +class CephProvides(RelationBase): + scope = scopes.UNIT + + @hook('{provides:ceph-client}-relation-{joined,changed}') + def changed(self): + self.set_state('{relation_name}.connected') + # service = hookenv.remote_service_name() + conversation = self.conversation() + if conversation.get_remote('broker_req'): + self.set_state('{relation_name}.broker_requested') + + def provide_auth(self, service, key, auth_supported, public_address): + """ + Provide a token to a requesting service. + :param str service: The service which requested the key + :param str key: The key to access Ceph + :param str auth_supported: Supported auth methods + :param str public_address: Ceph's public address + """ + conversation = self.conversation(scope=service) + # print("Conversation is ", conversation) + # key is a keyword argument to the set_remote function so we have to + # set it separately. + relation_set( + relation_id=conversation.namespace, + relation_settings={'key': key}) + opts = { + 'auth': auth_supported, + 'ceph-public-address': public_address, + } + conversation.set_remote(**opts) + + def requested_keys(self): + """ + Return a list of tuples mapping a service name to the key name + requested by that service. + Example usage:: + for service, key in ceph.requested_keys(): + ceph.provide_auth(service, key, auth, public_address) + """ + for conversation in self.conversations(): + service = conversation.scope + key = self.requested_key(service) + if key is None: + yield service + + def requested_key(self, service): + """ + Return the key provided to the requesting service. + """ + return self.conversation(scope=service).get_remote('key') + + def provide_broker_token(self, service, unit_response_key, token): + """ + Provide a token to a requesting service. + :param str service: The service which requested the key + :param str unit_response_key: The unique key for the unit + :param str token: Broker token top provide + """ + conversation = self.conversation(scope=service) + + # broker_rsp is being left for backward compatibility, + # unit_response_key superscedes it + conversation.set_remote(**{ + 'broker_rsp': token, + unit_response_key: token, + }) + + def requested_tokens(self): + """ + Return a list of tuples mapping a service name to the token name + requested by that service. + Example usage:: + for service, token in ceph.requested_tokens(): + ceph.provide_auth(service, token, auth, public_address) + """ + for conversation in self.conversations(): + service = conversation.scope + token = self.requested_token(service) + yield service, token + + def requested_token(self, service): + """ + Return the token provided to the requesting service. + """ + return self.conversation(scope=service).get_remote('broker_req') diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_requires.py b/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_requires.py new file mode 100644 index 0000000..6c8bb70 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/lib/base_requires.py @@ -0,0 +1,331 @@ +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import charms.reactive as reactive + +from charmhelpers.core.hookenv import ( + application_name, + local_unit, + log, +) +from charmhelpers.contrib.network.ip import format_ipv6_addr + +from charmhelpers.contrib.storage.linux.ceph import ( + CephBrokerRq, + is_request_complete, + is_request_sent, +) + + +class CephRequires(reactive.Endpoint): + + def joined(self): + reactive.set_flag(self.expand_name('{endpoint_name}.connected')) + + @property + def key(self): + return self._key() + + def _key(self): + return self.all_joined_units.received.get('key') + + @property + def auth(self): + return self._auth() + + def _auth(self): + return self.all_joined_units.received.get('auth') + + @property + def relation_name(self): + return self.expand_name('{endpoint_name}') + + def initial_ceph_response(self): + raise NotImplementedError + + def changed(self): + data = self.initial_ceph_response() + if all(data.values()): + reactive.set_flag(self.expand_name('{endpoint_name}.available')) + + rq = self.get_current_request() + if rq: + log("changed broker_req: {}".format(rq.ops)) + + if rq and is_request_complete(rq, relation=self.relation_name): + log("Setting ceph-client.pools.available") + reactive.set_flag( + self.expand_name('{endpoint_name}.pools.available')) + else: + log("incomplete request. broker_req not found") + + def broken(self): + reactive.clear_flag( + self.expand_name('{endpoint_name}.available')) + reactive.clear_flag( + self.expand_name('{endpoint_name}.connected')) + reactive.clear_flag( + self.expand_name('{endpoint_name}.pools.available')) + + def create_replicated_pool(self, name, replicas=3, weight=None, + pg_num=None, group=None, namespace=None, + app_name=None, **kwargs): + """ + Request pool setup + + :param name: Name of pool to create + :type name: str + :param replicas: Number of replicas for supporting pools + :type replicas: int + :param weight: The percentage of data the pool makes up + :type weight: Optional[float] + :param pg_num: If not provided, this value will be calculated by the + broker based on how many OSDs are in the cluster at the + time of creation. Note that, if provided, this value + will be capped at the current available maximum. + :type pg_num: Optional[int] + :param group: Group to add pool to. + :type group: Optional[str] + :param namespace: A group can optionally have a namespace defined that + will be used to further restrict pool access. + :type namespace: Optional[str] + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: Optional[str] + :param kwargs: Additional keyword arguments subject to validation. + Refer to CephBrokerRq.add_op_create_replicated_pool + method for documentation. + :type kwargs: Dict[str,any] + """ + rq = self.get_current_request() or CephBrokerRq() + kwargs.update({ + 'name': name, + 'replica_count': replicas, + 'pg_num': pg_num, + 'weight': weight, + 'group': group, + 'namespace': namespace, + 'app_name': app_name, + }) + rq.add_op_create_replicated_pool(**kwargs) + self.send_request_if_needed(rq) + reactive.clear_flag( + self.expand_name('{endpoint_name}.pools.available')) + + def create_pool(self, name, replicas=3, weight=None, pg_num=None, + group=None, namespace=None): + """ + Request pool setup -- deprecated. Please use create_replicated_pool + or create_erasure_pool(which doesn't exist yet) + + @param name: Name of pool to create + @param replicas: Number of replicas for supporting pools + @param weight: The percentage of data the pool makes up + @param pg_num: If not provided, this value will be calculated by the + broker based on how many OSDs are in the cluster at the + time of creation. Note that, if provided, this value + will be capped at the current available maximum. + @param group: Group to add pool to. + @param namespace: A group can optionally have a namespace defined that + will be used to further restrict pool access. + """ + self.create_replicated_pool(name, replicas, weight, pg_num, group, + namespace) + + def create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None, + allow_ec_overwrites=False, + **kwargs): + """ + Request erasure coded pool setup + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure profile for pool + :type erasure_profile: str + :param weight: The percentage of data the pool makes up + :type weight: Optional[float] + :param group: Group to add pool to. + :type group: Optional[str] + :param app_name: Name of application using pool + :type app_name: Optional[str] + :param max_bytes: Maximum bytes of quota to apply + :type max_bytes: Optional[int] + :param max_objects: Maximum object quota to apply + :type max_objects: Optional[int] + :param allow_ec_overwrites: Allow EC pools to be overwritten + :type allow_ec_overwrites: bool + :param kwargs: Additional keyword arguments subject to validation. + Refer to CephBrokerRq.add_op_create_replicated_pool + method for documentation. + :type kwargs: Dict[str,any] + """ + rq = self.get_current_request() or CephBrokerRq() + kwargs.update({ + 'name': name, + 'erasure_profile': erasure_profile, + 'weight': weight, + 'group': group, + 'app_name': app_name, + 'max_bytes': max_bytes, + 'max_objects': max_objects, + 'allow_ec_overwrites': allow_ec_overwrites, + }) + rq.add_op_create_erasure_pool(**kwargs) + self.send_request_if_needed(rq) + reactive.clear_flag( + self.expand_name('{endpoint_name}.pools.available')) + + def create_erasure_profile(self, name, + erasure_type='jerasure', + erasure_technique=None, + k=None, m=None, + failure_domain=None, + lrc_locality=None, + shec_durability_estimator=None, + clay_helper_chunks=None, + device_class=None, + clay_scalar_mds=None, + lrc_crush_locality=None): + """ + Create erasure coding profile + + @param name: Name of erasure coding profile + @param erasure_type: Erasure coding plugin to use + @param erasure_technique: Erasure coding technique to use + @param k: Number of data chunks + @param m: Number of coding chunks + @param failure_domain: Failure domain to use for PG placement + @param lrc_locality: + Group the coding and data chunks into sets + of size locality (lrc plugin) + @param shec_durability_estimator: + The number of parity chuncks each of which includes + a data chunk in its calculation range (shec plugin) + @param clay_helper_chunks: + The number of helper chunks to use for recovery operations + (clay plugin) + @param device_class: + Device class to use for profile (ssd, hdd, nvme) + @param clay_scalar_mds: + Plugin to use for CLAY layered construction + (jerasure|isa|shec) + @param lrc_crush_locality: + Type of crush bucket in which set of chunks + defined by lrc_locality will be stored. + """ + rq = self.get_current_request() or CephBrokerRq() + rq.add_op_create_erasure_profile( + name=name, + erasure_type=erasure_type, + erasure_technique=erasure_technique, + k=k, m=m, + failure_domain=failure_domain, + lrc_locality=lrc_locality, + shec_durability_estimator=shec_durability_estimator, + clay_helper_chunks=clay_helper_chunks, + device_class=device_class, + clay_scalar_mds=clay_scalar_mds, + lrc_crush_locality=lrc_crush_locality + ) + self.send_request_if_needed(rq) + reactive.clear_flag( + self.expand_name('{endpoint_name}.pools.available')) + + def request_access_to_group(self, name, namespace=None, permission=None, + key_name=None, + object_prefix_permissions=None): + """ + Adds the requested permissions to service's Ceph key + + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} + @param name: Target group name for permissions request. + @param namespace: namespace to further restrict pool access. + @param permission: Permission to be requested against pool + @param key_name: userid to grant permission to + @param object_prefix_permissions: Add object_prefix permissions. + """ + current_request = self.get_current_request() or CephBrokerRq() + current_request.add_op_request_access_to_group( + name, + namespace=namespace, + permission=permission, + key_name=key_name, + object_prefix_permissions=object_prefix_permissions) + self.send_request_if_needed(current_request) + + def send_request_if_needed(self, request): + """Send broker request if an equivalent request has not been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=self.relation_name): + log('Request already sent but not complete, ' + 'not sending new request') + else: + for relation in self.relations: + relation.to_publish['broker_req'] = json.loads( + request.request) + relation.to_publish_raw[ + 'application-name'] = application_name() + relation.to_publish_raw['unit-name'] = local_unit() + + def get_current_request(self): + broker_reqs = [] + for relation in self.relations: + broker_req = relation.to_publish.get('broker_req', {}) + if broker_req: + rq = CephBrokerRq() + rq.set_ops(broker_req['ops']) + broker_reqs.append(rq) + # Check that if there are multiple requests then they are the same. + assert all(x == broker_reqs[0] for x in broker_reqs) + if broker_reqs: + return broker_reqs[0] + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + values = [] + for relation in self.relations: + for unit in relation.units: + value = unit.received.get(key, default) + if value: + values.append(value) + return list(set(values)) + + def mon_hosts(self): + """List of all monitor host public addresses""" + hosts = [] + addrs = self.get_remote_all('ceph-public-address') + for ceph_addrs in addrs: + # NOTE(jamespage): This looks odd but deals with + # use with ceph-proxy which + # presents all monitors in + # a single space delimited field. + for addr in ceph_addrs.split(' '): + hosts.append(format_ipv6_addr(addr) or addr) + hosts.sort() + return hosts diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/provides.py b/kubernetes-control-plane/hooks/relations/ceph-client/provides.py new file mode 100644 index 0000000..a8fbd05 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/provides.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .lib import base_provides + + +class CephClientProvider(base_provides.CephProvides): + + pass diff --git a/kubernetes-control-plane/hooks/relations/ceph-client/requires.py b/kubernetes-control-plane/hooks/relations/ceph-client/requires.py new file mode 100644 index 0000000..f542246 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/ceph-client/requires.py @@ -0,0 +1,46 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .lib import base_requires + +from charms.reactive import ( + when, +) + + +class CephClientRequires(base_requires.CephRequires): + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + super().joined() + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + super().changed() + + @when('endpoint.{endpoint_name}.departed') + def departed(self): + super().changed() + + @when('endpoint.{endpoint_name}.broken') + def broken(self): + super().broken() + + def initial_ceph_response(self): + data = { + 'key': self.key, + 'auth': self.auth, + 'mon_hosts': self.mon_hosts() + } + return data diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/.gitignore b/kubernetes-control-plane/hooks/relations/container-runtime/.gitignore new file mode 100644 index 0000000..894a44c --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/.gitignore @@ -0,0 +1,104 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/LICENSE b/kubernetes-control-plane/hooks/relations/container-runtime/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/README.md b/kubernetes-control-plane/hooks/relations/container-runtime/README.md new file mode 100644 index 0000000..4620013 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/README.md @@ -0,0 +1,45 @@ +# interface-container-runtime + +## Overview + +This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime. + +## Usage + +### Provides + +The providing side of the container interface provides a place for a container runtime to connect to. + +Your charm should respond to the `endpoint.{endpoint_name}.available` state, +which indicates that there is a container runtime connected. + +A trivial example of handling this interface would be: + +```python +@when('endpoint.containerd.joined') +def update_kubelet_config(containerd): + endpoint = endpoint_from_flag('endpoint.containerd.joined') + config = endpoint.get_config() + kubelet.config['container-runtime'] = \ + config['runtime'] +``` + +### Requires + +The requiring side of the container interface requires a place for a container runtime to connect to. + +Your charm should set `{endpoint_name}.available` state, +which indicates that the container is runtime connected. + +A trivial example of handling this interface would be: + +```python +@when('endpoint.containerd.joined') +def pubish_config(): + endpoint = endpoint_from_flag('endpoint.containerd.joined') + endpoint.set_config( + socket='unix:///var/run/containerd/containerd.sock', + runtime='remote', + nvidia_enabled=False + ) +``` diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/__init__.py b/kubernetes-control-plane/hooks/relations/container-runtime/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/interface.yaml b/kubernetes-control-plane/hooks/relations/container-runtime/interface.yaml new file mode 100644 index 0000000..294be1e --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/interface.yaml @@ -0,0 +1,4 @@ +name: container-runtime +summary: Interface for relating to container runtimes +version: 1 +maintainer: "Joe Borg " diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/provides.py b/kubernetes-control-plane/hooks/relations/container-runtime/provides.py new file mode 100644 index 0000000..a9768a8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/provides.py @@ -0,0 +1,55 @@ +from charms.reactive import ( + Endpoint, + toggle_flag +) + + +class ContainerRuntimeProvides(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + + def _get_config(self, key): + """ + Get the published configuration for a given key. + + :param key: String dict key + :return: String value for given key + """ + return self.all_joined_units.received.get(key) + + def get_nvidia_enabled(self): + """ + Get the published nvidia config. + + :return: String + """ + return self._get_config(key='nvidia_enabled') + + def get_runtime(self): + """ + Get the published runtime config. + + :return: String + """ + return self._get_config(key='runtime') + + def get_socket(self): + """ + Get the published socket config. + + :return: String + """ + return self._get_config(key='socket') + + def set_config(self, sandbox_image=None): + """ + Set the configuration to be published. + + :param sandbox_image: String to optionally override the sandbox image + :return: None + """ + for relation in self.relations: + relation.to_publish.update({ + 'sandbox_image': sandbox_image + }) diff --git a/kubernetes-control-plane/hooks/relations/container-runtime/requires.py b/kubernetes-control-plane/hooks/relations/container-runtime/requires.py new file mode 100644 index 0000000..c461b68 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/container-runtime/requires.py @@ -0,0 +1,61 @@ +from charms.reactive import ( + Endpoint, + clear_flag, + data_changed, + is_data_changed, + toggle_flag +) + + +class ContainerRuntimeRequires(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'), + self.is_joined and self._config_changed()) + + def _config_changed(self): + """ + Determine if our received data has changed. + + :return: Boolean + """ + # NB: this call should match whatever we're tracking in handle_remote_config + return is_data_changed('containerd.remote_config', + [self.get_sandbox_image()]) + + def handle_remote_config(self): + """ + Keep track of received data so we can know if it changes. + + :return: None + """ + clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure')) + # Presently, we only care about one piece of remote config. Expand + # the list as needed. + data_changed('containerd.remote_config', + [self.get_sandbox_image()]) + + def get_sandbox_image(self): + """ + Get the sandbox image URI if a remote has published one. + + :return: String: remotely configured sandbox image + """ + return self.all_joined_units.received.get('sandbox_image') + + def set_config(self, socket, runtime, nvidia_enabled): + """ + Set the configuration to be published. + + :param socket: String uri to runtime socket + :param runtime: String runtime executable + :param nvidia_enabled: Boolean nvidia runtime enabled + :return: None + """ + for relation in self.relations: + relation.to_publish.update({ + 'socket': socket, + 'runtime': runtime, + 'nvidia_enabled': nvidia_enabled + }) diff --git a/kubernetes-control-plane/hooks/relations/coordinator/peers.py b/kubernetes-control-plane/hooks/relations/coordinator/peers.py new file mode 100644 index 0000000..f443bf6 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/coordinator/peers.py @@ -0,0 +1,21 @@ +# Copyright 2016-2018 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju charms. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charms import reactive + + +class CoordinatorPeer(reactive.Endpoint): + pass diff --git a/kubernetes-control-plane/hooks/relations/etcd/.gitignore b/kubernetes-control-plane/hooks/relations/etcd/.gitignore new file mode 100644 index 0000000..e43b0f9 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/kubernetes-control-plane/hooks/relations/etcd/README.md b/kubernetes-control-plane/hooks/relations/etcd/README.md new file mode 100644 index 0000000..9ed51dd --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/README.md @@ -0,0 +1,89 @@ +# Overview + +This interface layer handles the communication with Etcd via the `etcd` +interface. + +# Usage + +## Requires + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.connected` The relation is established, but Etcd may not + yet have provided any connection or service information. + + * `{relation_name}.available` Etcd has provided its connection string + information, and is ready to serve as a KV store. + The provided information can be accessed via the following methods: + * `etcd.get_connection_string()` + * `etcd.get_version()` + * `{relation_name}.tls.available` Etcd has provided the connection string + information, and the tls client credentials to communicate with it. + The client credentials can be accessed via: + * `{relation_name}.get_client_credentials()` returning a dictionary of + the clinet certificate, key and CA. + * `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience + method to save the client certificate, key and CA to files of your + choosing. + + +For example, a common application for this is configuring an applications +backend key/value storage, like Docker. + +```python +@when('etcd.available', 'docker.available') +def swarm_etcd_cluster_setup(etcd): + con_string = etcd.connection_string().replace('http', 'etcd') + opts = {} + opts['connection_string'] = con_string + render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts) + +``` + + +## Provides + +A charm providing this interface is providing the Etcd rest api service. + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.connected` One or more clients of any type have + been related. The charm should call the following methods to provide the + appropriate information to the clients: + + * `{relation_name}.set_connection_string(string, version)` + * `{relation_name}.set_client_credentials(key, cert, ca)` + +Example: + +```python +@when('db.connected') +def send_connection_details(db): + cert = leader_get('client_certificate') + key = leader_get('client_key') + ca = leader_get('certificate_authority') + # Set the key, cert, and ca on the db relation + db.set_client_credentials(key, cert, ca) + + port = hookenv.config().get('port') + # Get all the peers participating in the cluster relation. + addresses = cluster.get_peer_addresses() + connections = [] + for address in addresses: + connections.append('http://{0}:{1}'.format(address, port)) + # Set the connection string on the db relation. + db.set_connection_string(','.join(conections)) +``` + + +# Contact Information + +### Maintainer +- Charles Butler + + +# Etcd + +- [Etcd](https://coreos.com/etcd/) home page +- [Etcd bug trackers](https://github.com/coreos/etcd/issues) +- [Etcd Juju Charm](http://jujucharms.com/?text=etcd) diff --git a/kubernetes-control-plane/hooks/relations/etcd/__init__.py b/kubernetes-control-plane/hooks/relations/etcd/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/etcd/interface.yaml b/kubernetes-control-plane/hooks/relations/etcd/interface.yaml new file mode 100644 index 0000000..929b1d5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/interface.yaml @@ -0,0 +1,4 @@ +name: etcd +summary: Interface for relating to ETCD +version: 2 +maintainer: "Charles Butler " diff --git a/kubernetes-control-plane/hooks/relations/etcd/peers.py b/kubernetes-control-plane/hooks/relations/etcd/peers.py new file mode 100644 index 0000000..90980d1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/peers.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdPeer(RelationBase): + '''This class handles peer relation communication by setting states that + the reactive code can respond to. ''' + + scope = scopes.UNIT + + @hook('{peers:etcd}-relation-joined') + def peer_joined(self): + '''A new peer has joined, set the state on the unit so we can track + when they are departed. ''' + conv = self.conversation() + conv.set_state('{relation_name}.joined') + + @hook('{peers:etcd}-relation-departed') + def peers_going_away(self): + '''Trigger a state on the unit that it is leaving. We can use this + state in conjunction with the joined state to determine which unit to + unregister from the etcd cluster. ''' + conv = self.conversation() + conv.remove_state('{relation_name}.joined') + conv.set_state('{relation_name}.departing') + + def dismiss(self): + '''Remove the departing state from all other units in the conversation, + and we can resume normal operation. + ''' + for conv in self.conversations(): + conv.remove_state('{relation_name}.departing') + + def get_peers(self): + '''Return a list of names for the peers participating in this + conversation scope. ''' + peers = [] + # Iterate over all the conversations of this type. + for conversation in self.conversations(): + peers.append(conversation.scope) + return peers + + def set_db_ingress_address(self, address): + '''Set the ingress address belonging to the db relation.''' + for conversation in self.conversations(): + conversation.set_remote('db-ingress-address', address) + + def get_db_ingress_addresses(self): + '''Return a list of db ingress addresses''' + addresses = [] + # Iterate over all the conversations of this type. + for conversation in self.conversations(): + address = conversation.get_remote('db-ingress-address') + if address: + addresses.append(address) + return addresses diff --git a/kubernetes-control-plane/hooks/relations/etcd/provides.py b/kubernetes-control-plane/hooks/relations/etcd/provides.py new file mode 100644 index 0000000..3cfc174 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/provides.py @@ -0,0 +1,47 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdProvider(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:etcd}-relation-{joined,changed}') + def joined_or_changed(self): + ''' Set the connected state from the provides side of the relation. ''' + self.set_state('{relation_name}.connected') + + @hook('{provides:etcd}-relation-{broken,departed}') + def broken_or_departed(self): + '''Remove connected state from the provides side of the relation. ''' + conv = self.conversation() + if len(conv.units) == 1: + conv.remove_state('{relation_name}.connected') + + def set_client_credentials(self, key, cert, ca): + ''' Set the client credentials on the global conversation for this + relation. ''' + self.set_remote('client_key', key) + self.set_remote('client_ca', ca) + self.set_remote('client_cert', cert) + + def set_connection_string(self, connection_string, version=''): + ''' Set the connection string on the global conversation for this + relation. ''' + # Note: Version added as a late-dependency for 2 => 3 migration + # If no version is specified, consumers should presume etcd 2.x + self.set_remote('connection_string', connection_string) + self.set_remote('version', version) diff --git a/kubernetes-control-plane/hooks/relations/etcd/requires.py b/kubernetes-control-plane/hooks/relations/etcd/requires.py new file mode 100644 index 0000000..435532f --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/etcd/requires.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdClient(RelationBase): + scope = scopes.GLOBAL + + @hook('{requires:etcd}-relation-{joined,changed}') + def changed(self): + ''' Indicate the relation is connected, and if the relation data is + set it is also available. ''' + self.set_state('{relation_name}.connected') + + if self.get_connection_string(): + self.set_state('{relation_name}.available') + # Get the ca, key, cert from the relation data. + cert = self.get_client_credentials() + # The tls state depends on the existance of the ca, key and cert. + if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa + self.set_state('{relation_name}.tls.available') + + @hook('{requires:etcd}-relation-{broken, departed}') + def broken(self): + ''' Indicate the relation is no longer available and not connected. ''' + self.remove_state('{relation_name}.available') + self.remove_state('{relation_name}.connected') + self.remove_state('{relation_name}.tls.available') + + def connection_string(self): + ''' This method is depreciated but ensures backward compatibility + @see get_connection_string(self). ''' + return self.get_connection_string() + + def get_connection_string(self): + ''' Return the connection string, if available, or None. ''' + return self.get_remote('connection_string') + + def get_version(self): + ''' Return the version of the etd protocol being used, or None. ''' + return self.get_remote('version') + + def get_client_credentials(self): + ''' Return a dict with the client certificate, ca and key to + communicate with etcd using tls. ''' + return {'client_cert': self.get_remote('client_cert'), + 'client_key': self.get_remote('client_key'), + 'client_ca': self.get_remote('client_ca')} + + def save_client_credentials(self, key, cert, ca): + ''' Save all the client certificates for etcd to local files. ''' + self._save_remote_data('client_cert', cert) + self._save_remote_data('client_key', key) + self._save_remote_data('client_ca', ca) + + def _save_remote_data(self, key, path): + ''' Save the remote data to a file indicated by path creating the + parent directory if needed.''' + value = self.get_remote(key) + if value: + parent = os.path.dirname(path) + if not os.path.isdir(parent): + os.makedirs(parent) + with open(path, 'w') as stream: + stream.write(value) diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/.gitignore b/kubernetes-control-plane/hooks/relations/gcp-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/LICENSE b/kubernetes-control-plane/hooks/relations/gcp-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/README.md b/kubernetes-control-plane/hooks/relations/gcp-integration/README.md new file mode 100644 index 0000000..42861fb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `gcp-integration` interface communication protocol +and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:gcp-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:gcp-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `gcp-integration` interface protocol: + +```yaml +requires: + gcp: + interface: gcp-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the gcp-integrator charm) diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/__init__.py b/kubernetes-control-plane/hooks/relations/gcp-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/copyright b/kubernetes-control-plane/hooks/relations/gcp-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/docs/provides.md b/kubernetes-control-plane/hooks/relations/gcp-integration/docs/provides.md new file mode 100644 index 0000000..6f29a39 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/docs/provides.md @@ -0,0 +1,183 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the GCP +integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for GCP integration features. + The GCP integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

GCPIntegrationProvides

+ +```python +GCPIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.gcp.requests-pending') +def handle_requests(): + gcp = endpoint_from_flag('endpoint.gcp.requests-pending') + for request in gcp.requests: + if request.instance_labels: + layer.gcp.label_instance( + request.instance, + request.zone, + request.instance_labels) + if request.requested_load_balancer_management: + layer.gcp.enable_load_balancer_management( + request.charm, + request.instance, + request.zone, + ) + # ... + gcp.mark_completed() +``` + +

relation_ids

+ + +A list of the IDs of all established relations. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

get_departed_charms

+ +```python +GCPIntegrationProvides.get_departed_charms(self) +``` + +Get a list of all charms that have had all units depart since the +last time this was called. + +

mark_completed

+ +```python +GCPIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

charm

+ + +The charm name reported for this request. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

instance

+ + +The instance name reported for this request. + +

instance_labels

+ + +Mapping of label names to values to apply to this instance. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

model_uuid

+ + +The UUID of the model containing the application making this request. + +

relation_id

+ + +The ID of the relation for the unit making the request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

requested_security_management

+ + +Flag indicating whether security management was requested. + +

unit_name

+ + +The name of the unit making the request. + +

zone

+ + +The zone reported for this request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(self, credentials) +``` + +Set the credentials for this request. + diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/docs/requires.md b/kubernetes-control-plane/hooks/relations/gcp-integration/docs/requires.md new file mode 100644 index 0000000..36e23c2 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/docs/requires.md @@ -0,0 +1,140 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with GCP native features. The integration will +be provided by the GCP integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of GCP specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific GCP features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the GCP instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

GCPIntegrationRequires

+ +```python +GCPIntegrationRequires(self, *args, **kwargs) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.gcp.joined') +def request_gcp_integration(): + gcp = endpoint_from_flag('endpoint.gcp.joined') + gcp.label_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + gcp.request_load_balancer_management() + # ... + +@when('endpoint.gcp.ready') +def gcp_integration_ready(): + update_config_enable_gcp() +``` + +

instance

+ + +This unit's instance name. + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

zone

+ + +The zone this unit is in. + +

label_instance

+ +```python +GCPIntegrationRequires.label_instance(self, labels) +``` + +Request that the given labels be applied to this instance. + +__Parameters__ + +- __`labels` (dict)__: Mapping of labels names to values. + +

enable_instance_inspection

+ +```python +GCPIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +GCPIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking. + +

enable_security_management

+ +```python +GCPIntegrationRequires.enable_security_management(self) +``` + +Request the ability to manage security (e.g., firewalls). + +

enable_block_storage_management

+ +```python +GCPIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +GCPIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +GCPIntegrationRequires.enable_object_storage_access(self) +``` + +Request the ability to access object storage. + +

enable_object_storage_management

+ +```python +GCPIntegrationRequires.enable_object_storage_management(self) +``` + +Request the ability to manage object storage. + diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/interface.yaml b/kubernetes-control-plane/hooks/relations/gcp-integration/interface.yaml new file mode 100644 index 0000000..9966e3f --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/interface.yaml @@ -0,0 +1,4 @@ +name: gcp-integration +summary: Interface for connecting to the GCP integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/make_docs b/kubernetes-control-plane/hooks/relations/gcp-integration/make_docs new file mode 100644 index 0000000..bd4e54e --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'gcp': {'interface': 'gcp-integration'}}, + 'provides': {'gcp': {'interface': 'gcp-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/provides.py b/kubernetes-control-plane/hooks/relations/gcp-integration/provides.py new file mode 100644 index 0000000..ba34b0d --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/provides.py @@ -0,0 +1,253 @@ +""" +This is the provides side of the interface layer, for use only by the GCP +integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for GCP integration features. + The GCP integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class GCPIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.gcp.requests-pending') + def handle_requests(): + gcp = endpoint_from_flag('endpoint.gcp.requests-pending') + for request in gcp.requests: + if request.instance_labels: + layer.gcp.label_instance( + request.instance, + request.zone, + request.instance_labels) + if request.requested_load_balancer_management: + layer.gcp.enable_load_balancer_management( + request.charm, + request.instance, + request.zone, + ) + # ... + gcp.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + if not hasattr(self, '_requests'): + all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + is_changed = attrgetter('is_changed') + self._requests = list(filter(is_changed, all_requests)) + return self._requests + + @property + def relation_ids(self): + """ + A list of the IDs of all established relations. + """ + return [relation.relation_id for relation in self.relations] + + def get_departed_charms(self): + """ + Get a list of all charms that have had all units depart since the + last time this was called. + """ + joined_charms = {unit.received['charm'] + for unit in self.all_joined_units + if unit.received['charm']} + departed_charms = [unit.received['charm'] + for unit in self.all_departed_units + if unit.received['charm'] not in joined_charms] + self.all_departed_units.clear() + return departed_charms + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + for request in self.requests: + request.mark_completed() + clear_flag(self.expand_name('requests-pending')) + self._requests = [] + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def _completed(self): + return self._to_publish.get('completed', {}) + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + if not all([self.charm, self.instance, self.zone, self._requested]): + return False + return self._completed.get(self.instance) != self._requested + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._completed + completed[self.instance] = self._requested + self._to_publish['completed'] = completed # have to explicitly update + + def set_credentials(self, credentials): + """ + Set the credentials for this request. + """ + self._unit.relation.to_publish['credentials'] = credentials + + @property + def has_credentials(self): + """ + Whether or not credentials have been set via `set_credentials`. + """ + return 'credentials' in self._unit.relation.to_publish + + @property + def relation_id(self): + """ + The ID of the relation for the unit making the request. + """ + return self._unit.relation.relation_id + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def charm(self): + """ + The charm name reported for this request. + """ + return self._unit.received['charm'] + + @property + def instance(self): + """ + The instance name reported for this request. + """ + return self._unit.received['instance'] + + @property + def zone(self): + """ + The zone reported for this request. + """ + return self._unit.received['zone'] + + @property + def model_uuid(self): + """ + The UUID of the model containing the application making this request. + """ + return self._unit.received['model-uuid'] + + @property + def instance_labels(self): + """ + Mapping of label names to values to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-labels', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_security_management(self): + """ + Flag indicating whether security management was requested. + """ + return bool(self._unit.received['enable-security-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/pydocmd.yml b/kubernetes-control-plane/hooks/relations/gcp-integration/pydocmd.yml new file mode 100644 index 0000000..9ef5e78 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'GCP Integration Interface' + +generate: + - requires.md: + - requires + - requires.GCPIntegrationRequires+ + - provides.md: + - provides + - provides.GCPIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/gcp-integration/requires.py b/kubernetes-control-plane/hooks/relations/gcp-integration/requires.py new file mode 100644 index 0000000..bbd191f --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/gcp-integration/requires.py @@ -0,0 +1,227 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with GCP native features. The integration will +be provided by the GCP integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of GCP specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific GCP features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the GCP instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import os +import random +import string +from urllib.parse import urljoin +from urllib.request import urlopen, Request + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from GCP metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class GCPIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.gcp.joined') + def request_gcp_integration(): + gcp = endpoint_from_flag('endpoint.gcp.joined') + gcp.label_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + gcp.request_load_balancer_management() + # ... + + @when('endpoint.gcp.ready') + def gcp_integration_ready(): + update_config_enable_gcp() + ``` + """ + # https://cloud.google.com/compute/docs/storing-retrieving-metadata + _metadata_url = 'http://metadata.google.internal/computeMetadata/v1/' + _instance_url = urljoin(_metadata_url, 'instance/name') + _zone_url = urljoin(_metadata_url, 'instance/zone') + _metadata_headers = {'Metadata-Flavor': 'Google'} + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._instance = None + self._zone = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single GCP integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single GCP integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['charm'] = hookenv.charm_name() + self._to_publish['instance'] = self.instance + self._to_publish['zone'] = self.zone + self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID'] + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), self.is_ready) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def instance(self): + """ + This unit's instance name. + """ + if self._instance is None: + cache_key = self.expand_name('instance') + cached = unitdata.kv().get(cache_key) + if cached: + self._instance = cached + else: + req = Request(self._instance_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + instance = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._instance = instance + unitdata.kv().set(cache_key, self._instance) + return self._instance + + @property + def zone(self): + """ + The zone this unit is in. + """ + if self._zone is None: + cache_key = self.expand_name('zone') + cached = unitdata.kv().get(cache_key) + if cached: + self._zone = cached + else: + req = Request(self._zone_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + zone = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._zone = zone.split('/')[-1] + unitdata.kv().set(cache_key, self._zone) + return self._zone + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + requested = self._to_publish['requested'] + completed = self._received.get('completed', {}).get(self.instance) + return requested and requested == completed + + @property + def credentials(self): + return self._received['credentials'] + + def _request(self, keyvals): + alphabet = string.ascii_letters + string.digits + nonce = ''.join(random.choice(alphabet) for _ in range(8)) + self._to_publish.update(keyvals) + self._to_publish['requested'] = nonce + clear_flag(self.expand_name('ready')) + + def label_instance(self, labels): + """ + Request that the given labels be applied to this instance. + + # Parameters + `labels` (dict): Mapping of labels names to values. + """ + self._request({'instance-labels': dict(labels)}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-network-management': True}) + + def enable_security_management(self): + """ + Request the ability to manage security (e.g., firewalls). + """ + self._request({'enable-security-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns': True}) + + def enable_object_storage_access(self): + """ + Request the ability to access object storage. + """ + self._request({'enable-object-storage-access': True}) + + def enable_object_storage_management(self): + """ + Request the ability to manage object storage. + """ + self._request({'enable-object-storage-management': True}) diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/.gitignore b/kubernetes-control-plane/hooks/relations/grafana-dashboard/.gitignore new file mode 100644 index 0000000..01a6a44 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/.gitignore @@ -0,0 +1,3 @@ +.docs +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/LICENSE b/kubernetes-control-plane/hooks/relations/grafana-dashboard/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/README.md b/kubernetes-control-plane/hooks/relations/grafana-dashboard/README.md new file mode 100644 index 0000000..b111350 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/README.md @@ -0,0 +1,92 @@ +# Interface grafana-dashboard + +This is a [Juju][] interface layer that enables a charm which provides +dashboards to be imported into Grafana. + +You can download existing [Grafana Dashboards][] or use the [Grafana Dashboard +Reference][] to create your own. + +# Example Usage + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +provides: + grafana: + interface: grafana-dashboard +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:grafana-dashboard +``` + +Then, in your reactive code, add the following, modifying the dashboard data as +your charm needs: + +```python +import json +from charms.reactive import endpoint_from_flag + + +@when('endpoint.grafana.joined') +def register_grafana_dashboards(): + grafana = endpoint_from_flag('endpoint.grafana.joined') + for dashboard_file in Path('files/grafana').glob('*.json'): + dashboard = json.loads(dashboard_file.read_text()) + grafana.register_dashboard(name=dashboard_file.stem, + dashboard=dashboard) +``` + + + +# Reference + +* [common.md](common.md) + * [ImportRequest](docs/common.md#importrequest) + * [egress_subnets](docs/common.md#importrequest-egress_subnets) + * [ingress_address](docs/common.md#importrequest-ingress_address) + * [is_created](docs/common.md#importrequest-is_created) + * [is_received](docs/common.md#importrequest-is_received) + * [respond](docs/common.md#importrequest-respond) + * [ImportResponse](docs/common.md#importresponse) + * [name](docs/common.md#importresponse-name) +* [provides.md](provides.md) + * [GrafanaDashboardProvides](docs/provides.md#grafanadashboardprovides) + * [all_departed_units](docs/provides.md#grafanadashboardprovides-all_departed_units) + * [all_joined_units](docs/provides.md#grafanadashboardprovides-all_joined_units) + * [all_units](docs/provides.md#grafanadashboardprovides-all_units) + * [endpoint_name](docs/provides.md#grafanadashboardprovides-endpoint_name) + * [failed_imports](docs/provides.md#grafanadashboardprovides-failed_imports) + * [is_joined](docs/provides.md#grafanadashboardprovides-is_joined) + * [joined](docs/provides.md#grafanadashboardprovides-joined) + * [manage_flags](docs/provides.md#grafanadashboardprovides-manage_flags) + * [register_dashboard](docs/provides.md#grafanadashboardprovides-register_dashboard) + * [relations](docs/provides.md#grafanadashboardprovides-relations) + * [requests](docs/provides.md#grafanadashboardprovides-requests) + * [responses](docs/provides.md#grafanadashboardprovides-responses) +* [requires.md](requires.md) + * [GrafanaDashboardRequires](docs/requires.md#grafanadashboardrequires) + * [all_departed_units](docs/requires.md#grafanadashboardrequires-all_departed_units) + * [all_joined_units](docs/requires.md#grafanadashboardrequires-all_joined_units) + * [all_requests](docs/requires.md#grafanadashboardrequires-all_requests) + * [all_units](docs/requires.md#grafanadashboardrequires-all_units) + * [endpoint_name](docs/requires.md#grafanadashboardrequires-endpoint_name) + * [is_joined](docs/requires.md#grafanadashboardrequires-is_joined) + * [joined](docs/requires.md#grafanadashboardrequires-joined) + * [manage_flags](docs/requires.md#grafanadashboardrequires-manage_flags) + * [new_requests](docs/requires.md#grafanadashboardrequires-new_requests) + * [relations](docs/requires.md#grafanadashboardrequires-relations) + + + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Grafana Dashboards]: https://grafana.com/grafana/dashboards +[Grafana Dashboard Reference]: https://grafana.com/docs/reference/dashboard/ diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/__init__.py b/kubernetes-control-plane/hooks/relations/grafana-dashboard/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/common.py b/kubernetes-control-plane/hooks/relations/grafana-dashboard/common.py new file mode 100644 index 0000000..99db2d8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/common.py @@ -0,0 +1,38 @@ +from charms.reactive import BaseRequest, BaseResponse, Field + + +class ImportResponse(BaseResponse): + success = Field(description='Whether or not the import succeeded') + reason = Field(description='If failed, a description of why') + + @property + def name(self): + """ + The name given when the import was requested. + """ + return self.request.name + + +class ImportRequest(BaseRequest): + RESPONSE_CLASS = ImportResponse + + name = Field(description=""" + Name of the dashboard to import. Informational only, so that + you can tell which dashboard request this was, e.g. to check + for success or failure. + """) + + dashboard = Field(description=""" + Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized + JSON.) + """) + + def respond(self, success, reason=None): + """ + Acknowledge this request, and indicate success or failure with an + optional explanation. + """ + # wrap the base respond method to make the success field required and + # positional, as well as to provide a better doc string + super().respond(success=success, reason=reason) diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/copyright b/kubernetes-control-plane/hooks/relations/grafana-dashboard/copyright new file mode 100644 index 0000000..69768db --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/common.md b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/common.md new file mode 100644 index 0000000..ab7de1d --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/common.md @@ -0,0 +1,50 @@ +# `class ImportRequest(BaseRequest)` + +Base class for requests using the request / response pattern. + +Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of +the :class:`BaseResponse` which defines the fields that the response will +use. They must also define additional attributes as :class:`Field`s. + +For example:: + + class TLSResponse(BaseResponse): + key = Field('Private key for the cert') + cert = Field('Public cert info') + + + class TLSRequest(BaseRequest): + RESPONSE_CLASS = TLSResponse + + common_name = Field('Common Name (CN) for the cert to be created') + sans = Field('List of Subject Alternative Names (SANs)') + +## `egress_subnets` + +Subnets over which network traffic to the requester will flow. + +## `ingress_address` + +Address to use if a connection to the requester is required. + +## `is_created` + +Whether this request was created by this side of the relation. + +## `is_received` + +Whether this request was received by the other side of the relation. + +## `def respond(self, success, reason=None)` + +Acknowledge this request, and indicate success or failure with an +optional explanation. + +# `class ImportResponse(BaseResponse)` + +Base class for responses using the request / response pattern. + +## `name` + +The name given when the import was requested. + diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/provides.md b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/provides.md new file mode 100644 index 0000000..cc1f3dc --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/provides.md @@ -0,0 +1,120 @@ +# `class GrafanaDashboardProvides(RequesterEndpoint)` + +Base class for Endpoints that create requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `failed_imports` + +A list of requests that failed to import. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `def register_dashboard(self, name, dashboard)` + +Request a dashboard to be imported. + +:param name: Name of dashboard. Informational only, so that you can + tell which dashboard request this was, e.g. to check for success or + failure. +:param dashboard: Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized JSON.) + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + +## `requests` + +A list of all requests which have been submitted. + +## `responses` + +A list of all responses which have been received. + diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/requires.md b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/requires.md new file mode 100644 index 0000000..c84f1bc --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/docs/requires.md @@ -0,0 +1,109 @@ +# `class GrafanaDashboardRequires(ResponderEndpoint)` + +Base class for Endpoints that respond to requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_requests` + +A list of all requests, including ones which have been responded to. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `new_requests` + +A list of requests which have not been responded. + +Requests should be handled by the charm and then responded to by +calling ``request.respond(...)``. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/interface.yaml b/kubernetes-control-plane/hooks/relations/grafana-dashboard/interface.yaml new file mode 100644 index 0000000..0ee9ef8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/interface.yaml @@ -0,0 +1,6 @@ +name: grafana-dashboard +summary: Interface for importing dashboards into Grafana +version: 1 +maintainer: "Cory Johns " +exclude: + - .docs diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/provides.py b/kubernetes-control-plane/hooks/relations/grafana-dashboard/provides.py new file mode 100644 index 0000000..670ded9 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/provides.py @@ -0,0 +1,42 @@ +from charms.reactive import ( + toggle_flag, + RequesterEndpoint, +) + +from .common import ImportRequest + + +class GrafanaDashboardProvides(RequesterEndpoint): + REQUEST_CLASS = ImportRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.failed'), + self.is_joined and self.failed_imports) + + @property + def failed_imports(self): + """ + A list of requests that failed to import. + """ + return [response + for response in self.responses + if not response.success] + + def register_dashboard(self, name, dashboard): + """ + Request a dashboard to be imported. + + :param name: Name of dashboard. Informational only, so that you can + tell which dashboard request this was, e.g. to check for success or + failure. + :param dashboard: Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized JSON.) + """ + # we might be connected to multiple grafanas for some strange + # reason, so just send the dashboard to all of them + for relation in self.relations: + ImportRequest.create_or_update(match_fields=['name'], + relation=relation, + name=name, + dashboard=dashboard) diff --git a/kubernetes-control-plane/hooks/relations/grafana-dashboard/requires.py b/kubernetes-control-plane/hooks/relations/grafana-dashboard/requires.py new file mode 100644 index 0000000..de696c1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/grafana-dashboard/requires.py @@ -0,0 +1,15 @@ +from charms.reactive import ( + toggle_flag, + ResponderEndpoint, +) + +from .common import ImportRequest + + +class GrafanaDashboardRequires(ResponderEndpoint): + REQUEST_CLASS = ImportRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.requests'), + self.is_joined and self.new_requests) diff --git a/kubernetes-control-plane/hooks/relations/hacluster/.stestr.conf b/kubernetes-control-plane/hooks/relations/hacluster/.stestr.conf new file mode 100644 index 0000000..5fcccac --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/kubernetes-control-plane/hooks/relations/hacluster/README.md b/kubernetes-control-plane/hooks/relations/hacluster/README.md new file mode 100644 index 0000000..e8147ac --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/README.md @@ -0,0 +1,90 @@ +# Overview + +This interface handles the communication with the hacluster subordinate +charm using the `ha` interface protocol. + +# Usage + +## Requires + +The interface layer will set the following reactive states, as appropriate: + + * `{relation_name}.connected` The relation is established and ready for + the local charm to configure the hacluster subordinate charm. The + configuration of the resources to manage for the hacluster charm + can be managed via one of the following methods: + + * `manage_resources` method + * `bind_on` method + + Configuration of the managed resources within the hacluster can be + managed by passing `common.CRM` object definitions to the + `manage_resources` method. + + * `{relation_name}.available` The hacluster is up and ready. + +For example: +```python +from charms.reactive import when, when_not +from charms.reactive import set_state, remove_state + +from relations.hacluster.common import CRM + + +@when('ha.connected') +def cluster_connected(hacluster): + + resources = CRM() + resources.primitive('res_vip', 'ocf:IPAddr2', + params='ip=10.0.3.100 nic=eth0', + op='monitor interval="10s"') + resources.clone('cl_res_vip', 'res_vip') + + hacluster.bind_on(iface='eth0', mcastport=4430) + hacluster.manage_resources(resources) +``` + +Additionally, for more code clarity a custom object implements the interface +defined in common.ResourceDescriptor can be used to simplify the code for +reuse. + +For example: +```python +import ipaddress + +from relation.hacluster.common import CRM +from relation.hacluster.common import ResourceDescriptor + +class VirtualIP(ResourceDescriptor): + def __init__(self, vip, nic='eth0'): + self.vip = vip + self.nic = 'eth0' + + def configure_resource(self, crm): + ipaddr = ipaddress.ip_address(self.vip) + if isinstance(ipaddr, ipaddress.IPv4Address): + res_type = 'ocf:heartbeat:IPAddr2' + res_parms = 'ip={ip} nic={nic}'.format(ip=self.vip, + nic=self.nic) + else: + res_type = 'ocf:heartbeat:IPv6addr' + res_params = 'ipv6addr={ip} nic={nic}'.format(ip=self.vip, + nic=self.nic) + + crm.primitive('res_vip', res_type, params=res_params, + op='monitor interval="10s"') + crm.clone('cl_res_vip', 'res_vip') +``` + +Once the VirtualIP class above has been defined in charm code, it can make +the code a bit cleaner. The example above can thusly be written as: + +```python +@when('ha.connected') +def cluster_connected(hacluster): + resources = CRM() + resources.add(VirtualIP('10.0.3.100')) + + hacluster.bind_on(iface='eth0', mcastport=4430) + hacluster.manage_resources(resources) +``` diff --git a/kubernetes-control-plane/hooks/relations/hacluster/__init__.py b/kubernetes-control-plane/hooks/relations/hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/hacluster/copyright b/kubernetes-control-plane/hooks/relations/hacluster/copyright new file mode 100644 index 0000000..5a49dcb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/copyright @@ -0,0 +1,21 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2015, Canonical Ltd. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/kubernetes-control-plane/hooks/relations/hacluster/interface.yaml b/kubernetes-control-plane/hooks/relations/hacluster/interface.yaml new file mode 100644 index 0000000..f03f3d7 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/interface.yaml @@ -0,0 +1,16 @@ +name: hacluster +summary: | + Provides the hacluster interface used for configuring Corosync + and Pacemaker services. +maintainer: OpenStack Charmers +ignore: + - '.gitignore' + - '.gitreview' + - '.testr.conf' + - 'test-requirements' + - 'tox.ini' + - 'unit_tests' + - '.zuul.yaml' + - 'setup.cfg' + - 'setup.py' + - '**/ops_ha_interface.py' diff --git a/kubernetes-control-plane/hooks/relations/hacluster/interface_hacluster/__init__.py b/kubernetes-control-plane/hooks/relations/hacluster/interface_hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/hacluster/interface_hacluster/common.py b/kubernetes-control-plane/hooks/relations/hacluster/interface_hacluster/common.py new file mode 100644 index 0000000..6e23d34 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/interface_hacluster/common.py @@ -0,0 +1,1008 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import ipaddress +import json + + +class ResourceManagement(): + + def data_changed(self, data_id, data, hash_type='md5'): + raise NotImplementedError + + def get_local(self, key, default=None, scope=None): + raise NotImplementedError + + def set_local(self, key=None, value=None, data=None, scope=None, **kwdata): + raise NotImplementedError + + def set_remote(self, key=None, value=None, data=None, scope=None, + **kwdata): + raise NotImplementedError + + def is_clustered(self): + """Has the hacluster charm set clustered? + + The hacluster charm sets cluster=True when it determines it is ready. + Check the relation data for clustered and force a boolean return. + + :returns: boolean + """ + clustered_values = self.get_remote_all('clustered') + if clustered_values: + # There is only ever one subordinate hacluster unit + clustered = clustered_values[0] + # Future versions of hacluster will return a bool + # Current versions return a string + if type(clustered) is bool: + return clustered + elif (clustered is not None and + (clustered.lower() == 'true' or + clustered.lower() == 'yes')): + return True + return False + + def bind_on(self, iface=None, mcastport=None): + relation_data = {} + if iface: + relation_data['corosync_bindiface'] = iface + if mcastport: + relation_data['corosync_mcastport'] = mcastport + + if relation_data and self.data_changed('hacluster-bind_on', + relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def manage_resources(self, crm): + """ + Request for the hacluster to manage the resources defined in the + crm object. + + res = CRM() + res.primitive('res_neutron_haproxy', 'lsb:haproxy', + op='monitor interval="5s"') + res.init_services('haproxy') + res.clone('cl_nova_haproxy', 'res_neutron_haproxy') + + hacluster.manage_resources(crm) + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + relation_data = { + 'json_{}'.format(k): json.dumps(v, sort_keys=True) + for k, v in crm.items() + } + if self.data_changed('hacluster-manage_resources', relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def bind_resources(self, iface=None, mcastport=None): + """Inform the ha subordinate about each service it should manage. The + child class specifies the services via self.ha_resources + + :param iface: string - Network interface to bind to + :param mcastport: int - Multicast port corosync should use for cluster + management traffic + """ + if mcastport is None: + mcastport = 4440 + resources_dict = self.get_local('resources') + self.bind_on(iface=iface, mcastport=mcastport) + if resources_dict: + resources = CRM(**resources_dict) + self.manage_resources(resources) + + def delete_resource(self, resource_name): + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add_delete_resource(resource_name) + self.set_local(resources=resources) + + def add_vip(self, name, vip, iface=None, netmask=None): + """Add a VirtualIP object for each user specified vip to self.resources + + :param name: string - Name of service + :param vip: string - Virtual IP to be managed + :param iface: string - Network interface to bind vip to + :param netmask: string - Netmask for vip + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + VirtualIP( + name, + vip, + nic=iface, + cidr=netmask,)) + + # Vip Group + group = 'grp_{}_vips'.format(name) + vip_res_group_members = [] + if resource_dict: + vip_resources = resource_dict.get('resources') + if vip_resources: + for vip_res in vip_resources: + if 'vip' in vip_res: + vip_res_group_members.append(vip_res) + resources.group(group, + *sorted(vip_res_group_members)) + + self.set_local(resources=resources) + + def remove_vip(self, name, vip, iface=None): + """Remove a virtual IP + + :param name: string - Name of service + :param vip: string - Virtual IP + :param iface: string - Network interface vip bound to + """ + if iface: + nic_name = iface + else: + nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] + self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) + + def add_init_service(self, name, service, clone=True): + """Add a InitService object for haproxy to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in init system + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + InitService(name, service, clone)) + self.set_local(resources=resources) + + def remove_init_service(self, name, service): + """Remove an init service + + :param name: string - Name of service + :param service: string - Name of service used in init system + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_systemd_service(self, name, service, clone=True): + """Add a SystemdService object to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in systemd + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + SystemdService(name, service, clone)) + self.set_local(resources=resources) + + def remove_systemd_service(self, name, service): + """Remove a systemd service + + :param name: string - Name of service + :param service: string - Name of service used in systemd + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_dnsha(self, name, ip, fqdn, endpoint_type): + """Add a DNS entry to self.resources + + :param name: string - Name of service + :param ip: string - IP address dns entry should resolve to + :param fqdn: string - The DNS entry name + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + DNSEntry(name, ip, fqdn, endpoint_type)) + + # DNS Group + group = 'grp_{}_hostnames'.format(name) + dns_res_group_members = [] + if resource_dict: + dns_resources = resource_dict.get('resources') + if dns_resources: + for dns_res in dns_resources: + if 'hostname' in dns_res: + dns_res_group_members.append(dns_res) + resources.group(group, + *sorted(dns_res_group_members)) + + self.set_local(resources=resources) + + def remove_dnsha(self, name, endpoint_type): + """Remove a DNS entry + + :param name: string - Name of service + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + self.delete_resource(res_key) + + def add_colocation(self, name, score, colo_resources, node_attribute=None): + """Add a colocation directive + + :param name: string - Name of colocation directive + :param score: string - ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY}. See + CRM.colocation for more details + :param colo_resources: List[string] - List of resource names to + colocate + :param node_attribute: Colocate resources on a set of nodes with this + attribute and not necessarily on the same node. + """ + node_config = {} + if node_attribute: + node_config = { + 'node_attribute': node_attribute} + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.colocation( + name, + score, + *colo_resources, + **node_config) + self.set_local(resources=resources) + + def remove_colocation(self, name): + """Remove a colocation directive + + :param name: string - Name of colocation directive + """ + self.delete_resource(name) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + raise NotImplementedError + + +class CRM(dict): + """ + Configuration object for Pacemaker resources for the HACluster + interface. This class provides access to the supported resources + available in the 'crm configure' within the HACluster. + + See Also + -------- + More documentation is available regarding the definitions of + primitives, clones, and other pacemaker resources at the crmsh + site at http://crmsh.github.io/man + """ + + # Constants provided for ordering constraints (e.g. the kind value) + MANDATORY = "Mandatory" + OPTIONAL = "Optional" + SERIALIZE = "Serialize" + + # Constants defining weights of constraints + INFINITY = "inf" + NEG_INFINITY = "-inf" + + # Constaints aliased to their interpretations for constraints + ALWAYS = INFINITY + NEVER = NEG_INFINITY + + def __init__(self, *args, **kwargs): + self['resources'] = {} + self['delete_resources'] = [] + self['resource_params'] = {} + self['groups'] = {} + self['ms'] = {} + self['orders'] = {} + self['colocations'] = {} + self['clones'] = {} + self['locations'] = {} + self['init_services'] = [] + self['systemd_services'] = [] + super(CRM, self).__init__(*args, **kwargs) + + def primitive(self, name, agent, description=None, **kwargs): + """Configures a primitive resource within Pacemaker. + + A primitive is used to describe a resource which should be managed + by the cluster. Primitives consist of a name, the agent type, and + various configuration options to the primitive. For example: + + crm.primitive('www8', 'apache', + params='configfile=/etc/apache/www8.conf', + operations='$id-ref=apache_ops') + + will create the an apache primitive (resource) for the www8 service + hosted by the Apache HTTP server. The parameters specified can either + be provided individually (e.g. a string) or as an iterable. + + The following example shows how to specify multiple ops for a drbd + volume in a Master/Slave configuration:: + + ops = ['monitor role=Master interval=60s', + 'monitor role=Slave interval=300s'] + + crm.primitive('r0', 'ocf:linbit:drbd', + params='drbd_resource=r0', + op=ops) + + Additional arguments may be passed in as kwargs in which the key of + the kwarg is prepended to the value. + + Parameters + ---------- + name: str + the name of the primitive. + agent: str + the type of agent to use to monitor the primitive resource + (e.g. ocf:linbit:drbd). + description: str, optional, kwarg + a description about the resource + params: str or iterable, optional, kwarg + parameters which are provided to the resource agent + meta: str or iterable, optional, kwarg + metadata information for the primitive resource + utilization: str or iterable, optional, kwarg + utilization information for the primitive resource + operations: str or iterable, optional, kwarg + operations information for the primitive resource in id_spec + format (e.g. $id= or $id-ref=) + op: str or iterable, optional, kwarg + op information regarding the primitive resource. This takes the + form of ' [= = ...]' + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_primitive + """ + resources = self['resources'] + resources[name] = agent + + specs = '' + if description: + specs = specs + 'description="%s"' % description + + # Use the ordering specified in the crm manual + for key in 'params', 'meta', 'utilization', 'operations', 'op': + if key not in kwargs: + continue + specs = specs + (' %s' % self._parse(key, kwargs[key])) + + if specs: + self['resource_params'][name] = specs + + def _parse(self, prefix, data): + results = '' + if isinstance(data, str): + data = [data] + + first = True + for d in data: + if first: + results = results + ' ' + first = False + results = results + ('%s %s ' % (prefix, d)) + results = results.rstrip() + return results + + def clone(self, name, resource, description=None, **kwargs): + """Creates a resource which should run on all nodes. + + Parameters + ---------- + name: str + the name of the clone + resource: str + the name or id of the resource to clone + description: str, optional + text containing a description for the clone + meta: str or list of str, optional, kwarg + metadata attributes to assign to the clone + params: str or list of str, optional, kwarg + parameters to assign to the clone + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_clone + """ + clone_specs = resource + if description: + clone_specs = clone_specs + (' description="%s"' % description) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + if not value: + continue + clone_specs = clone_specs + (' %s' % self._parse(key, value)) + + self['clones'][name] = clone_specs + + def colocation(self, name, score=ALWAYS, *resources, **kwargs): + """Configures the colocation constraints of resources. + + Provides placement constraints regarding resources defined within + the cluster. Using the colocate function, resource affinity or + anti-affinity can be defined. + + For example, the following code ensures that the nova-console service + always runs where the cluster vip is running: + + crm.colocation('console_with_vip', ALWAYS, + 'nova-console', 'vip') + + The affinity or anti-affinity of resources relationships is be + expressed in the `score` parameter. A positive score indicates that + the resources should run on the same node.A score of INFINITY (or + ALWAYS) will ensure the resources are always run on the same node(s) + and a score of NEG_INFINITY (or NEVER) ensures that the resources are + never run on the same node(s). + + crm.colocation('never_apache_with_dummy', NEVER, + 'apache', 'dummy') + + Any *resources values which are provided are treated as resources which + the colocation constraint applies to. At least two resources must be + defined as part of the ordering constraint. + + The resources take the form of [:role]. If the + colocation constraint applies specifically to a role, this information + should be included int he resource supplied. + + Parameters + ---------- + id: str + id or name of the colocation constraint + score: str {ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY} or int + the score or weight of the colocation constraint. A positive value + will indicate that the resources should run on the same node. A + negative value indicates that the resources should run on separate + nodes. + resources: str or list + the list of resources which the colocation constraint applies to. + node_attribute: str, optional, kwarg + can be used to run the resources on a set of nodes, not just a + single node. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_colocation + """ + specs = '%s: %s' % (score, ' '.join(resources)) + if 'node_attribute' in kwargs: + specs = specs + (' node-attribute=%s' % kwargs['node_attribute']) + self['colocations'][name] = specs + + def group(self, name, *resources, **kwargs): + """Creates a group of resources within Pacemaker. + + The created group includes the list of resources provided in the list + of resources supplied. For example:: + + crm.group('grp_mysql', 'res_mysql_rbd', 'res_mysql_fs', + 'res_mysql_vip', 'res_mysqld') + + will create the 'grp_mysql' resource group consisting of the + res_mysql_rbd, res_mysql_fs, res_mysql_vip, and res_mysqld resources. + + Parameters + ---------- + name: str + the name of the group resource + resources: list of str + the names or ids of resources to include within the group. + description: str, optional, kwarg + text to describe the resource + meta: str or list of str, optional, kwarg + metadata attributes to assign to the group + params: str or list of str, optional, kwarg + parameters to assign to the group + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_group + """ + specs = ' '.join(resources) + if 'description' in kwargs: + specs = specs + (' description=%s"' % kwargs['description']) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + specs = specs + (' %s' % self._parse(key, value)) + + self['groups'][name] = specs + + def remove_deleted_resources(self): + """Work through the existing resources and remove any mention of ones + which have been marked for deletion.""" + for res in self['delete_resources']: + for key in self.keys(): + if key == 'delete_resources': + continue + if isinstance(self[key], dict) and res in self[key].keys(): + del self[key][res] + elif isinstance(self[key], list) and res in self[key]: + self[key].remove(res) + elif isinstance(self[key], tuple) and res in self[key]: + self[key] = tuple(x for x in self[key] if x != res) + + def delete_resource(self, *resources): + """Specify objects/resources to be deleted from within Pacemaker. This + is not additive, the list of resources is set to exaclty what was + passed in. + + Parameters + ---------- + resources: str or list + the name or id of the specific resource to delete. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_delete + """ + self['delete_resources'] = resources + self.remove_deleted_resources() + + def add_delete_resource(self, resource): + """Specify an object/resource to delete from within Pacemaker. It can + be called multiple times to add additional resources to the deletion + list. + + Parameters + ---------- + resources: str + the name or id of the specific resource to delete. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_delete + """ + if resource not in self['delete_resources']: + # NOTE(fnordahl): this unpleasant piece of code is regrettably + # necessary for Python3.4 (and trusty) compability see LP: #1814218 + # and LP: #1813982 + self['delete_resources'] = tuple( + self['delete_resources'] or ()) + (resource,) + self.remove_deleted_resources() + + def init_services(self, *resources): + """Specifies that the service(s) is an init or upstart service. + + Services (resources) which are noted as upstart services are + disabled, stopped, and left to pacemaker to manage the resource. + + Parameters + ---------- + resources: str or list of str, varargs + The resources which should be noted as init services. + + Returns + ------- + None + """ + self['init_services'] = resources + + def systemd_services(self, *resources): + """Specifies that the service(s) is a systemd service. + + Services (resources) which are noted as systemd services are + disabled, stopped, and left to pacemaker to manage the resource. + + Parameters + ---------- + resources: str or list of str, varargs + The resources which should be noted as systemd services. + + Returns + ------- + None + """ + self['systemd_services'] = resources + + def ms(self, name, resource, description=None, **kwargs): + """Create a master/slave resource type. + + The following code provides an example of creating a master/slave + resource on drbd disk1:: + + crm.ms('disk1', 'drbd1', meta='notify=true globally-unique=false') + + Parameters + ---------- + name: str + the name or id of the master resource + resource: str + the name or id of the resource which now ha a master/slave + assocation tied to it. + description: str, optional + a textual description of the master resource + meta: str or list of strs, optional, kwargs + strings defining the metadata for the master/slave resource type + params: str or list of strs, optional, kwargs + parameter strings which should be passed to the master/slave + resource creation + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_ms + """ + specs = resource + if description: + specs = specs + (' description="%s"' % description) + + for key in 'meta', 'params': + if key not in kwargs: + continue + value = kwargs[key] + specs = specs + (' %s' % self._parse(key, value)) + + self['ms'][name] = specs + + def location(self, name, resource, **kwargs): + """Defines the preference of nodes for the given resource. + + The location constraitns consist of one or more rules which specify + a score to be awarded if the rules match. + + Parameters + ---------- + name: str + the name or id of the location constraint + resource: str + the name, id, resource, set, tag, or resoruce pattern defining the + set of resources which match the location placement constraint. + attributes: str or list str, optional, kwarg + attributes which should be assigned to the location constraint + rule: str or list of str, optional, kwarg + the rule(s) which define the location constraint rules when + selecting a location to run the resource. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_location + """ + specs = resource + + # Check if there are attributes assigned to the location and if so, + # format the spec string with the attributes + if 'attributes' in kwargs: + attrs = kwargs['attributes'] + if isinstance(attrs, str): + attrs = [attrs] + specs = specs + (' %s' % ' '.join(attrs)) + + if 'rule' in kwargs: + rules = kwargs['rule'] + specs = specs + (' %s' % self._parse('rule', rules)) + + self['locations'][name] = specs + + def order(self, name, score=None, *resources, **kwargs): + """Configures the ordering constraints of resources. + + Provides ordering constraints to resources defined in a Pacemaker + cluster which affect the way that resources are started, stopped, + promoted, etc. Basic ordering is provided by simply specifying the + ordering name and an ordered list of the resources which the ordering + constraint applies to. + + For example, the following code ensures that the apache resource is + started after the ClusterIP is started:: + + hacluster.order('apache-after-ip', 'ClusterIP', 'apache') + + By default, the ordering constraint will specify that the ordering + constraint is mandatory. The constraint behavior can be specified + using the 'score' keyword argument, e.g.:: + + hacluster.order('apache-after-ip', score=hacluster.OPTIONAL, + 'ClusterIP', 'apache') + + Any *resources values which are provided are treated as resources which + the ordering constraint applies to. At least two resources must be + defined as part of the ordering constraint. + + The resources take the form of [:]. If the + ordering constraint applies to a specific action for the resource, + this information should be included in the resource supplied. + + Parameters + ---------- + name: str + the id or name of the order constraint + resoures: str or list of strs in varargs format + the resources the ordering constraint applies to. The ordering + of the list of resources is used to provide the ordering. + score: {MANDATORY, OPTIONAL, SERIALIZED}, optional + the score of the ordering constraint. + symmetrical: boolean, optional, kwarg + when True, then the services for the resources will be stopped in + the reverse order. The default value for this is True. + + Returns + ------- + None + + See Also + -------- + http://crmsh.github.io/man/#cmdhelp_configure_order + """ + specs = '' + if score: + specs = '%s:' % score + + specs = specs + (' %s' % ' '.join(resources)) + if 'symmetrical' in kwargs: + specs = specs + (' symmetrical=' % kwargs['symmetrical']) + + self['orders'][name] = specs + + def add(self, resource_desc): + """Adds a resource descriptor object to the CRM configuration. + + Adds a `ResourceDescriptor` object to the CRM configuration which + understands how to configure the resource itself. The + `ResourceDescriptor` object needs to know how to interact with this + CRM class in order to properly configure the pacemaker resources. + + The minimum viable resource descriptor object will implement a method + which takes a reference parameter to this CRM in order to configure + itself. + + Parameters + ---------- + resource_desC: ResourceDescriptor + an object which provides an abstraction of a monitored resource + within pacemaker. + + Returns + ------- + None + """ + method = getattr(resource_desc, 'configure_resource', None) + if not callable(method): + raise ValueError('Invalid resource_desc. The "configure_resource"' + ' function has not been defined.') + + method(self) + + +class ResourceDescriptor(object): + """ + A ResourceDescriptor provides a logical resource or concept and knows + how to configure pacemaker. + """ + + def configure_resource(self, crm): + """Configures the logical resource(s) within the CRM. + + This is the callback method which is invoked by the CRM in order + to allow this ResourceDescriptor to fully configure the logical + resource. + + For example, a Virtual IP may provide a standard abstraction and + configure the specific details under the covers. + """ + pass + + +class InitService(ResourceDescriptor): + def __init__(self, service_name, init_service_name, clone=True): + """Class for managing init resource + + :param service_name: string - Name of service + :param init_service_name: string - Name service uses in init system + :param clone: bool - clone service across all units + :returns: None + """ + self.service_name = service_name + self.init_service_name = init_service_name + self.clone = clone + + def configure_resource(self, crm): + """"Configure new init system service resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + res_key = 'res_{}_{}'.format( + self.service_name.replace('-', '_'), + self.init_service_name.replace('-', '_')) + res_type = 'lsb:{}'.format(self.init_service_name) + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + res_key, res_type, op='monitor interval="5s"', meta=_meta) + crm.init_services(self.init_service_name) + if self.clone: + clone_key = 'cl_{}'.format(res_key) + crm.clone(clone_key, res_key) + + +class VirtualIP(ResourceDescriptor): + def __init__(self, service_name, vip, nic=None, cidr=None): + """Class for managing VIP resource + + :param service_name: string - Name of service + :param vip: string - Virtual IP to be managed + :param nic: string - Network interface to bind vip to + :param cidr: string - Netmask for vip + :returns: None + """ + self.service_name = service_name + self.vip = vip + self.nic = nic + self.cidr = cidr + + def configure_resource(self, crm): + """Configure new vip resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + if self.nic: + vip_key = 'res_{}_{}_vip'.format(self.service_name, self.nic) + else: + vip_key = 'res_{}_{}_vip'.format( + self.service_name, + hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7]) + ipaddr = ipaddress.ip_address(self.vip) + if isinstance(ipaddr, ipaddress.IPv4Address): + res_type = 'ocf:heartbeat:IPaddr2' + res_params = 'ip="{}"'.format(self.vip) + else: + res_type = 'ocf:heartbeat:IPv6addr' + res_params = 'ipv6addr="{}"'.format(self.vip) + vip_params = 'ipv6addr' + vip_key = 'res_{}_{}_{}_vip'.format(self.service_name, self.nic, + vip_params) + + if self.nic: + res_params = '{} nic="{}"'.format(res_params, self.nic) + if self.cidr: + res_params = '{} cidr_netmask="{}"'.format(res_params, self.cidr) + # Monitor the VIP + _op_monitor = 'monitor timeout="20s" interval="10s" depth="0"' + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + vip_key, res_type, params=res_params, op=_op_monitor, meta=_meta) + + +class DNSEntry(ResourceDescriptor): + + def __init__(self, service_name, ip, fqdn, endpoint_type): + """Class for managing DNS entries + + :param service_name: string - Name of service + :param ip: string - IP to point DNS entry at + :param fqdn: string - DNS Entry + :param endpoint_type: string - The type of the endpoint represented by + the DNS record eg public, admin etc + :returns: None + """ + self.service_name = service_name + self.ip = ip + self.fqdn = fqdn + self.endpoint_type = endpoint_type + + def configure_resource(self, crm, res_type='ocf:maas:dns'): + """Configure new DNS resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :param res_type: string - Corosync Open Cluster Framework resource + agent to use for DNS HA + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + res_params = '' + if self.fqdn: + res_params = '{} fqdn="{}"'.format(res_params, self.fqdn) + if self.ip: + res_params = '{} ip_address="{}"'.format(res_params, self.ip) + crm.primitive(res_key, res_type, params=res_params) + + +class SystemdService(ResourceDescriptor): + def __init__(self, service_name, systemd_service_name, clone=True): + """Class for managing systemd resource + + :param service_name: string - Name of service + :param systemd_service_name: string - Name service uses in + systemd system + :param clone: bool - clone service across all units + :returns: None + """ + self.service_name = service_name + self.systemd_service_name = systemd_service_name + self.clone = clone + + def configure_resource(self, crm): + """"Configure new systemd system service resource in crm + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + res_key = 'res_{}_{}'.format( + self.service_name.replace('-', '_'), + self.systemd_service_name.replace('-', '_')) + res_type = 'systemd:{}'.format(self.systemd_service_name) + _meta = 'migration-threshold="INFINITY" failure-timeout="5s"' + crm.primitive( + res_key, res_type, op='monitor interval="5s"', meta=_meta) + crm.systemd_services(self.systemd_service_name) + if self.clone: + clone_key = 'cl_{}'.format(res_key) + crm.clone(clone_key, res_key) diff --git a/kubernetes-control-plane/hooks/relations/hacluster/requires.py b/kubernetes-control-plane/hooks/relations/hacluster/requires.py new file mode 100644 index 0000000..395a658 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/requires.py @@ -0,0 +1,58 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import relations.hacluster.interface_hacluster.common as common +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes +from charms.reactive.helpers import data_changed as rh_data_changed +from charmhelpers.core import hookenv + + +class HAClusterRequires(RelationBase, common.ResourceManagement): + # The hacluster charm is a subordinate charm and really only works + # for a single service to the HA Cluster relation, therefore set the + # expected scope to be GLOBAL. + scope = scopes.GLOBAL + + @hook('{requires:hacluster}-relation-joined') + def joined(self): + self.set_state('{relation_name}.connected') + + @hook('{requires:hacluster}-relation-changed') + def changed(self): + if self.is_clustered(): + self.set_state('{relation_name}.available') + else: + self.remove_state('{relation_name}.available') + + @hook('{requires:hacluster}-relation-{broken,departed}') + def departed(self): + self.remove_state('{relation_name}.available') + self.remove_state('{relation_name}.connected') + + def data_changed(self, data_id, data, hash_type='md5'): + return rh_data_changed(data_id, data, hash_type) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + values = [] + for conversation in self.conversations(): + for relation_id in conversation.relation_ids: + for unit in hookenv.related_units(relation_id): + value = hookenv.relation_get(key, + unit, + relation_id) or default + if value: + values.append(value) + return list(set(values)) diff --git a/kubernetes-control-plane/hooks/relations/hacluster/test-requirements.txt b/kubernetes-control-plane/hooks/relations/hacluster/test-requirements.txt new file mode 100644 index 0000000..12452e5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/hacluster/test-requirements.txt @@ -0,0 +1,7 @@ +# Lint and unit test requirements +flake8 +stestr>=2.2.0 +charms.reactive +coverage>=3.6 +netifaces +git+https://github.com/canonical/operator.git#egg=ops diff --git a/kubernetes-control-plane/hooks/relations/http/.gitignore b/kubernetes-control-plane/hooks/relations/http/.gitignore new file mode 100644 index 0000000..3374ec2 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/http/.gitignore @@ -0,0 +1,5 @@ +# Emacs save files +*~ +\#*\# +.\#* + diff --git a/kubernetes-control-plane/hooks/relations/http/README.md b/kubernetes-control-plane/hooks/relations/http/README.md new file mode 100644 index 0000000..3d7822a --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/http/README.md @@ -0,0 +1,68 @@ +# Overview + +This interface layer implements the basic form of the `http` interface protocol, +which is used for things such as reverse-proxies, load-balanced servers, REST +service discovery, et cetera. + +# Usage + +## Provides + +By providing the `http` interface, your charm is providing an HTTP server that +can be load-balanced, reverse-proxied, used as a REST endpoint, etc. + +Your charm need only provide the port on which it is serving its content, as +soon as the `{relation_name}.available` state is set: + +```python +@when('website.available') +def configure_website(website): + website.configure(port=hookenv.config('port')) +``` + +## Requires + +By requiring the `http` interface, your charm is consuming one or more HTTP +servers, as a REST endpoint, to load-balance a set of servers, etc. + +Your charm should respond to the `{relation_name}.available` state, which +indicates that there is at least one HTTP server connected. + +The `services()` method returns a list of available HTTP services and their +associated hosts and ports. + +The return value is a list of dicts of the following form: + +```python +[ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... +] +``` + +A trivial example of handling this interface would be: + +```python +from charms.reactive.helpers import data_changed + +@when('reverseproxy.available') +def update_reverse_proxy_config(reverseproxy): + services = reverseproxy.services() + if not data_changed('reverseproxy.services', services): + return + for service in services: + for host in service['hosts']: + hookenv.log('{} has a unit {}:{}'.format( + services['service_name'], + host['hostname'], + host['port'])) +``` diff --git a/kubernetes-control-plane/hooks/relations/http/__init__.py b/kubernetes-control-plane/hooks/relations/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/http/interface.yaml b/kubernetes-control-plane/hooks/relations/http/interface.yaml new file mode 100644 index 0000000..54e7748 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/http/interface.yaml @@ -0,0 +1,4 @@ +name: http +summary: Basic HTTP interface +version: 1 +repo: https://git.launchpad.net/~bcsaller/charms/+source/http diff --git a/kubernetes-control-plane/hooks/relations/http/provides.py b/kubernetes-control-plane/hooks/relations/http/provides.py new file mode 100644 index 0000000..86fa9b3 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/http/provides.py @@ -0,0 +1,67 @@ +import json + +from charmhelpers.core import hookenv +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpProvides(Endpoint): + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def get_ingress_address(self, rel_id=None): + # If no rel_id is provided, we fallback to the first one + if rel_id is None: + rel_id = self.relations[0].relation_id + return hookenv.ingress_address(rel_id, hookenv.local_unit()) + + def configure(self, port, private_address=None, hostname=None): + ''' configure the address(es). private_address and hostname can + be None, a single string address/hostname, or a list of addresses + and hostnames. Note that if a list is passed, it is assumed both + private_address and hostname are either lists or None ''' + for relation in self.relations: + ingress_address = self.get_ingress_address(relation.relation_id) + if type(private_address) is list or type(hostname) is list: + # build 3 lists to zip together that are the same length + length = max(len(private_address), len(hostname)) + p = [port] * length + a = private_address + [ingress_address] *\ + (length - len(private_address)) + h = hostname + [ingress_address] * (length - len(hostname)) + zipped_list = zip(p, a, h) + # now build an array of dictionaries from that in the desired + # format for the interface + data_list = [{'hostname': h, 'port': p, 'private-address': a} + for p, a, h in zipped_list] + # for backwards compatibility, we just send a single entry + # and have an array of dictionaries in a field of that + # entry for the other entries. + data = data_list.pop(0) + data['extended_data'] = json.dumps(data_list) + + relation.to_publish_raw.update(data) + else: + relation.to_publish_raw.update({ + 'hostname': hostname or ingress_address, + 'private-address': private_address or ingress_address, + 'port': port, + }) + + def set_remote(self, **kwargs): + # NB: This method provides backwards compatibility for charms that + # called RelationBase.set_remote. Most commonly, this was done by + # charms that needed to pass reverse proxy stanzas to http proxies. + # This type of interaction with base relation classes is discouraged, + # and should be handled with logic encapsulated in appropriate + # interfaces. Eventually, this method will be deprecated in favor of + # that behavior. + for relation in self.relations: + relation.to_publish_raw.update(kwargs) diff --git a/kubernetes-control-plane/hooks/relations/http/requires.py b/kubernetes-control-plane/hooks/relations/http/requires.py new file mode 100644 index 0000000..17ea6b7 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/http/requires.py @@ -0,0 +1,76 @@ +import json + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpRequires(Endpoint): + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + if any(unit.received_raw['port'] for unit in self.all_joined_units): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def services(self): + """ + Returns a list of available HTTP services and their associated hosts + and ports. + + The return value is a list of dicts of the following form:: + + [ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'private-address': private_address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... + ] + """ + def build_service_host(data): + private_address = data['private-address'] + host = data['hostname'] or private_address + if host and data['port']: + return (host, private_address, data['port']) + else: + return None + + services = {} + for relation in self.relations: + service_name = relation.application_name + service = services.setdefault(service_name, { + 'service_name': service_name, + 'hosts': [], + }) + host_set = set() + for unit in relation.joined_units: + data = unit.received_raw + host = build_service_host(data) + if host: + host_set.add(host) + + # if we have extended data, add it + if 'extended_data' in data: + for ed in json.loads(data['extended_data']): + host = build_service_host(ed) + if host: + host_set.add(host) + + service['hosts'] = [ + {'hostname': h, 'private-address': pa, 'port': p} + for h, pa, p in sorted(host_set) + ] + + ret = [s for s in services.values() if s['hosts']] + return ret diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitignore b/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitignore new file mode 100644 index 0000000..172bf57 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitignore @@ -0,0 +1 @@ +.tox diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitreview b/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitreview new file mode 100644 index 0000000..b9fc7e4 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/charm-interface-keystone-credentials diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/.stestr.conf b/kubernetes-control-plane/hooks/relations/keystone-credentials/.stestr.conf new file mode 100644 index 0000000..5fcccac --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/.zuul.yaml b/kubernetes-control-plane/hooks/relations/keystone-credentials/.zuul.yaml new file mode 100644 index 0000000..23d3066 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/.zuul.yaml @@ -0,0 +1,3 @@ +- project: + templates: + - python-charm-interface-jobs diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/__init__.py b/kubernetes-control-plane/hooks/relations/keystone-credentials/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/copyright b/kubernetes-control-plane/hooks/relations/keystone-credentials/copyright new file mode 100644 index 0000000..5a49dcb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/copyright @@ -0,0 +1,21 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2015, Canonical Ltd. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/interface.yaml b/kubernetes-control-plane/hooks/relations/keystone-credentials/interface.yaml new file mode 100644 index 0000000..5d99a86 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/interface.yaml @@ -0,0 +1,16 @@ +name: keystone-credentials +summary: > + Interface for integrating with Keystone identity credentials + Charms use this relation to obtain keystone credentials + without creating a service catalog entry. Set 'username' + only on the relation and keystone will set defaults and + return authentication details. Possible relation settings: + username: Username to be created. + project: Project (tenant) name to be created. Defaults to services + project. + requested_roles: Comma delimited list of roles to be created + requested_grants: Comma delimited list of roles to be granted. + Defaults to Admin role. + domain: Keystone v3 domain the user will be created in. Defaults + to the Default domain. +maintainer: OpenStack Charmers diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/provides.py b/kubernetes-control-plane/hooks/relations/keystone-credentials/provides.py new file mode 100644 index 0000000..e5a9dec --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/provides.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class KeystoneProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:keystone-credentials}-relation-joined') + def joined(self): + self.set_flag('{relation_name}.connected') + + @hook('{provides:keystone-credentials}-relation-{broken,departed}') + def departed(self): + self.clear_flag('{relation_name}.connected') + + def expose_credentials(self, credentials): + """Expose Keystone credentials to related units. + + :param credentials: The Keystone credentials to be exposed. + :type credentials: dict + """ + self.set_remote(**credentials) diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/requires.py b/kubernetes-control-plane/hooks/relations/keystone-credentials/requires.py new file mode 100644 index 0000000..93c7a53 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/requires.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core import hookenv +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class KeystoneRequires(RelationBase): + scope = scopes.GLOBAL + + # These remote data fields will be automatically mapped to accessors + # with a basic documentation string provided. + + auto_accessors = ['private-address', 'credentials_host', + 'credentials_protocol', 'credentials_port', + 'credentials_project', 'credentials_username', + 'credentials_password', 'credentials_project_id', + 'credentials_project_domain_id', + 'credentials_user_domain_id', + 'credentials_project_domain_name', + 'credentials_user_domain_name', + 'api_version', 'auth_host', 'auth_protocol', 'auth_port', + 'region', 'ca_cert', 'https_keystone'] + + @hook('{requires:keystone-credentials}-relation-joined') + def joined(self): + self.set_state('{relation_name}.connected') + self.update_state() + + def update_state(self): + """Update the states of the relations based on the data that the + relation has. + + If the :meth:`base_data_complete` is False then all of the states + are removed. Otherwise, the individual states are set according to + their own data methods. + """ + base_complete = self.base_data_complete() + states = { + '{relation_name}.available': True, + '{relation_name}.available.ssl': self.ssl_data_complete(), + '{relation_name}.available.auth': self.auth_data_complete() + } + for k, v in states.items(): + if base_complete and v: + self.set_state(k) + else: + self.remove_state(k) + + @hook('{requires:keystone-credentials}-relation-changed') + def changed(self): + self.update_state() + self.set_state('{relation_name}.available.updated') + hookenv.atexit(self._clear_updated) + + @hook('{requires:keystone-credentials}-relation-{broken,departed}') + def departed(self): + self.update_state() + + def base_data_complete(self): + data = { + 'private-address': self.private_address(), + 'credentials_host': self.credentials_host(), + 'credentials_protocol': self.credentials_protocol(), + 'credentials_port': self.credentials_port(), + 'api_version': self.api_version(), + 'auth_host': self.auth_host(), + 'auth_protocol': self.auth_protocol(), + 'auth_port': self.auth_port(), + } + if all(data.values()): + return True + return False + + def auth_data_complete(self): + data = { + 'credentials_project': self.credentials_project(), + 'credentials_username': self.credentials_username(), + 'credentials_password': self.credentials_password(), + 'credentials_project_id': self.credentials_project_id(), + } + if all(data.values()): + return True + return False + + def ssl_data_complete(self): + data = { + 'https_keystone': self.https_keystone(), + 'ca_cert': self.ca_cert(), + } + for value in data.values(): + if not value or value == '__null__': + return False + return True + + def request_credentials(self, username, project=None, region=None, + requested_roles=None, requested_grants=None, + domain=None): + """ + Request credentials from Keystone + + :side effect: set requested paramaters on the identity-credentials + relation + + Required parameter + :param username: Username to be created. + + Optional parametrs + :param project: Project (tenant) name to be created. Defaults to + services project. + :param requested_roles: Comma delimited list of roles to be created + :param requested_grants: Comma delimited list of roles to be granted. + Defaults to Admin role. + :param domain: Keystone v3 domain the user will be created in. Defaults + to the Default domain. + """ + relation_info = { + 'username': username, + 'project': project, + 'requested_roles': requested_roles, + 'requested_grants': requested_grants, + 'domain': domain, + } + + self.set_local(**relation_info) + self.set_remote(**relation_info) + + def _clear_updated(self): + self.remove_state('{relation_name}.available.updated') diff --git a/kubernetes-control-plane/hooks/relations/keystone-credentials/test-requirements.txt b/kubernetes-control-plane/hooks/relations/keystone-credentials/test-requirements.txt new file mode 100644 index 0000000..9ea2415 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/keystone-credentials/test-requirements.txt @@ -0,0 +1,2 @@ +flake8>=2.2.4 +stestr>=2.2.0 diff --git a/kubernetes-control-plane/hooks/relations/kube-control/.travis.yml b/kubernetes-control-plane/hooks/relations/kube-control/.travis.yml new file mode 100644 index 0000000..d2be8be --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-control/.travis.yml @@ -0,0 +1,9 @@ +language: python +python: + - "3.5" + - "3.6" + - "3.7" +install: + - pip install tox-travis +script: + - tox diff --git a/kubernetes-control-plane/hooks/relations/kube-control/README.md b/kubernetes-control-plane/hooks/relations/kube-control/README.md new file mode 100644 index 0000000..6f9ecb7 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-control/README.md @@ -0,0 +1,171 @@ +# kube-control interface + +This interface provides communication between master and workers in a +Kubernetes cluster. + + +## Provides (kubernetes-master side) + + +### States + +* `kube-control.connected` + + Enabled when a worker has joined the relation. + +* `kube-control.gpu.available` + + Enabled when any worker has indicated that it is running in gpu mode. + +* `kube-control.departed` + + Enabled when any worker has indicated that it is leaving the cluster. + + +* `kube-control.auth.requested` + + Enabled when an authentication credential is requested. This state is + temporary and will be removed once the units authentication request has + been fulfilled. + +### Methods + +* `kube_control.set_dns(port, domain, sdn_ip)` + + Sends DNS info to the connected worker(s). + + +* `kube_control.auth_user()` + + Returns a list of the requested username and group requested for + authentication. + +* `kube_control.sign_auth_request(scope, user, kubelet_token, proxy_token, client_token)` + + Sends authentication tokens to the unit scope for the requested user + and kube-proxy services. + +* `kube_control.set_cluster_tag(cluster_tag)` + + Sends a tag used to identify resources that are part of the cluster to the + connected worker(s). + +* `kube_control.flush_departed()` + + Returns the unit departing the kube_control relationship so you can do any + post removal cleanup. Such as removing authentication tokens for the unit. + Invoking this method will also remove the `kube-control.departed` state + +* `kube_control.set_registry_location(registry_location)` + Sends the container image registry location to the connected worker(s). + +### Examples + +```python + +@when('kube-control.connected') +def send_dns(kube_control): + # send port, domain, sdn_ip to the remote side + kube_control.set_dns(53, "cluster.local", "10.1.0.10") + +@when('kube-control.gpu.available') +def on_gpu_available(kube_control): + # The remote side is gpu-enable, handle it somehow + assert kube_control.get_gpu() == True + + +@when('kube-control.departed') +@when('leadership.is_leader') +def flush_auth_for_departed(kube_control): + ''' Unit has left the cluster and needs to have its authentication + tokens removed from the token registry ''' + departing_unit = kube_control.flush_departed() + +``` + +## Requires (kubernetes-worker side) + + +### States + +* `kube-control.connected` + + Enabled when a master has joined the relation. + +* `kube-control.dns.available` + + Enabled when DNS info is available from the master. + +* `kube-control.auth.available` + + Enabled when authentication credentials are present from the master. + +* `kube-control.cluster_tag.available` + + Enabled when cluster tag is present from the master. + +* `kube-control.registry_location.available` + + Enabled when registry location is present from the master. + +### Methods + +* `kube_control.get_dns()` + + Returns a dictionary of DNS info sent by the master. The keys in the + dict are: domain, private-address, sdn-ip, port. + +* `kube_control.set_gpu(enabled=True)` + + Tell the master that we are gpu-enabled. + +* `kube_control.get_auth_credentials(user)` + + Returns a dict with the users authentication credentials. + +* `set_auth_request(kubelet, group='system:nodes')` + + Issue an authentication request against the master to receive token based + auth credentials in return. + +* `kube_control.get_cluster_tag()` + + Returns the cluster tag provided by the master. + +* `kube_control.get_registry_location()` + + Returns the container image registry location provided by the master. + +### Examples + +```python + +@when('kube-control.dns.available') +def on_dns_available(kube_control): + # Remote side has sent DNS info + dns = kube_control.get_dns() + print(context['domain']) + print(context['private-address']) + print(context['sdn-ip']) + print(context['port']) + +@when('kube-control.connected') +def send_gpu(kube_control): + # Tell the master that we're gpu-enabled + kube_control.set_gpu(True) + +@when('kube-control.auth.available') +def display_auth_tokens(kube_control): + # Remote side has sent auth info + auth = kube_control.get_auth_credentials('root') + print(auth['kubelet_token']) + print(auth['proxy_token']) + print(auth['client_token']) + +@when('kube-control.connected') +@when_not('kube-control.auth.available') +def request_auth_credentials(kube_control): + # Request an admin user with sudo level access named 'root' + kube_control.set_auth_request('root', group='system:masters') + +``` diff --git a/kubernetes-control-plane/hooks/relations/kube-control/__init__.py b/kubernetes-control-plane/hooks/relations/kube-control/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/kube-control/interface.yaml b/kubernetes-control-plane/hooks/relations/kube-control/interface.yaml new file mode 100644 index 0000000..2f0b187 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-control/interface.yaml @@ -0,0 +1,6 @@ +name: kube-control +summary: Provides master-worker communication. +version: 1 +maintainer: "Tim Van Steenburgh " +ignore: +- tests diff --git a/kubernetes-control-plane/hooks/relations/kube-control/provides.py b/kubernetes-control-plane/hooks/relations/kube-control/provides.py new file mode 100644 index 0000000..050a175 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-control/provides.py @@ -0,0 +1,167 @@ +#!/usr/local/sbin/charm-env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from charms.reactive import ( + Endpoint, + toggle_flag, + set_flag, + data_changed +) + +from charmhelpers.core import ( + hookenv, + unitdata +) + + +DB = unitdata.kv() + + +class KubeControlProvider(Endpoint): + """ + Implements the kubernetes-master side of the kube-control interface. + """ + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag(self.expand_name('{endpoint_name}.gpu.available'), + self.is_joined and self._get_gpu()) + requests_data_id = self.expand_name('{endpoint_name}.requests') + requests = self.auth_user() + if data_changed(requests_data_id, requests): + set_flag(self.expand_name('{endpoint_name}.requests.changed')) + + def set_dns(self, port, domain, sdn_ip, enable_kube_dns): + """ + Send DNS info to the remote units. + + We'll need the port, domain, and sdn_ip of the dns service. If + sdn_ip is not required in your deployment, the units private-ip + is available implicitly. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'port': port, + 'domain': domain, + 'sdn-ip': sdn_ip, + 'enable-kube-dns': enable_kube_dns, + }) + + def auth_user(self): + """ + Return the kubelet_user value on the wire from the requestors. + """ + requests = [] + + for unit in self.all_joined_units: + requests.append( + (unit.unit_name, + {'user': unit.received_raw.get('kubelet_user'), + 'group': unit.received_raw.get('auth_group')}) + ) + + requests.sort() + return requests + + def sign_auth_request(self, scope, user, kubelet_token, proxy_token, + client_token): + """ + Send authorization tokens to the requesting unit. + """ + cred = { + 'scope': scope, + 'kubelet_token': kubelet_token, + 'proxy_token': proxy_token, + 'client_token': client_token + } + + if not DB.get('creds'): + DB.set('creds', {}) + + all_creds = DB.get('creds') + all_creds[user] = cred + DB.set('creds', all_creds) + + for relation in self.relations: + relation.to_publish.update({ + 'creds': all_creds + }) + + def clear_creds(self): + """ + Clear creds from the relation. This is used by non-leader units to stop + advertising creds so that the leader can assume full control of them. + """ + DB.unset('creds') + for relation in self.relations: + relation.to_publish_raw['creds'] = '' + + def _get_gpu(self): + """ + Return True if any remote worker is gpu-enabled. + """ + for unit in self.all_joined_units: + if unit.received_raw.get('gpu') == 'True': + hookenv.log('Unit {} has gpu enabled'.format(unit)) + return True + + return False + + def set_cluster_tag(self, cluster_tag): + """ + Send the cluster tag to the remote units. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'cluster-tag': cluster_tag + }) + + def set_registry_location(self, registry_location): + """ + Send the registry location to the remote units. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'registry-location': registry_location + }) + + def set_cohort_keys(self, cohort_keys): + """ + Send the cohort snapshot keys. + """ + for relation in self.relations: + relation.to_publish['cohort-keys'] = cohort_keys + + def set_default_cni(self, default_cni): + """ + Send the default CNI. The default_cni value should be a string + containing the name of a related CNI application to use as the + default CNI. For example: "flannel" or "calico". If no default has + been chosen then "" can be sent instead. + """ + for relation in self.relations: + relation.to_publish['default-cni'] = default_cni + + def set_api_endpoints(self, endpoints): + """ + Send the list of API endpoint URLs to which workers should connect. + """ + endpoints = sorted(endpoints) + for relation in self.relations: + relation.to_publish['api-endpoints'] = endpoints + + def set_has_xcp(self, has_xcp): + """ + Set the flag indicating that an external cloud provider is in use. + """ + for relation in self.relations: + relation.to_publish['has-xcp'] = bool(has_xcp) diff --git a/kubernetes-control-plane/hooks/relations/kube-control/requires.py b/kubernetes-control-plane/hooks/relations/kube-control/requires.py new file mode 100644 index 0000000..b72922d --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-control/requires.py @@ -0,0 +1,168 @@ +#!/usr/local/sbin/charm-env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import ( + Endpoint, + toggle_flag, +) + +from charmhelpers.core.hookenv import log + + +class KubeControlRequirer(Endpoint): + """ + Implements the kubernetes-worker side of the kube-control interface. + """ + def manage_flags(self): + """ + Set states corresponding to the data we have. + """ + toggle_flag( + self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag( + self.expand_name('{endpoint_name}.dns.available'), + self.is_joined and self.dns_ready()) + toggle_flag( + self.expand_name('{endpoint_name}.auth.available'), + self.is_joined and self._has_auth_credentials()) + toggle_flag( + self.expand_name('{endpoint_name}.cluster_tag.available'), + self.is_joined and self.get_cluster_tag()) + toggle_flag( + self.expand_name('{endpoint_name}.registry_location.available'), + self.is_joined and self.get_registry_location()) + toggle_flag( + self.expand_name('{endpoint_name}.cohort_keys.available'), + self.is_joined and self.cohort_keys) + toggle_flag( + self.expand_name('{endpoint_name}.default_cni.available'), + self.is_joined and self.get_default_cni() is not None) + toggle_flag( + self.expand_name('{endpoint_name}.api_endpoints.available'), + self.is_joined and self.get_api_endpoints()) + + def get_auth_credentials(self, user): + """ + Return the authentication credentials. + """ + rx = {} + for unit in self.all_joined_units: + rx.update(unit.received.get('creds', {})) + if not rx: + return None + + if user in rx: + return { + 'user': user, + 'kubelet_token': rx[user]['kubelet_token'], + 'proxy_token': rx[user]['proxy_token'], + 'client_token': rx[user]['client_token'] + } + else: + return None + + def get_dns(self): + """ + Return DNS info provided by the master. + """ + rx = self.all_joined_units.received_raw + + return { + 'port': rx.get('port'), + 'domain': rx.get('domain'), + 'sdn-ip': rx.get('sdn-ip'), + 'enable-kube-dns': rx.get('enable-kube-dns'), + } + + def dns_ready(self): + """ + Return True if we have all DNS info from the master. + """ + keys = ['port', 'domain', 'sdn-ip', 'enable-kube-dns'] + dns_info = self.get_dns() + return (set(dns_info.keys()) == set(keys) and + dns_info['enable-kube-dns'] is not None) + + def set_auth_request(self, kubelet, group='system:nodes'): + """ + Tell the master that we are requesting auth, and to use this + hostname for the kubelet system account. + + Param groups - Determines the level of eleveted privleges of the + requested user. Can be overridden to request sudo level access on the + cluster via changing to system:masters. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'kubelet_user': kubelet, + 'auth_group': group + }) + + def set_gpu(self, enabled=True): + """ + Tell the master that we're gpu-enabled (or not). + """ + log('Setting gpu={} on kube-control relation'.format(enabled)) + for relation in self.relations: + relation.to_publish_raw.update({ + 'gpu': enabled + }) + + def _has_auth_credentials(self): + """ + Predicate method to signal we have authentication credentials. + """ + if self.all_joined_units.received_raw.get('creds'): + return True + + def get_cluster_tag(self): + """ + Tag for identifying resources that are part of the cluster. + """ + return self.all_joined_units.received_raw.get('cluster-tag') + + def get_registry_location(self): + """ + URL for container image registry. + """ + return self.all_joined_units.received_raw.get('registry-location') + + @property + def cohort_keys(self): + """ + The cohort snapshot keys sent by the masters. + """ + return self.all_joined_units.received['cohort-keys'] + + def get_default_cni(self): + """ + Default CNI network to use. + """ + return self.all_joined_units.received['default-cni'] + + def get_api_endpoints(self): + """ + Returns a list of API endpoint URLs. + """ + endpoints = set() + for unit in self.all_joined_units: + endpoints.update(unit.received['api-endpoints'] or []) + return sorted(endpoints) + + @property + def has_xcp(self): + """ + The flag indicating whether an external cloud provider is in use. + """ + return self.all_joined_units.received.get("has-xcp", False) diff --git a/kubernetes-control-plane/hooks/relations/kube-dns/README.md b/kubernetes-control-plane/hooks/relations/kube-dns/README.md new file mode 100644 index 0000000..15ce8bb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-dns/README.md @@ -0,0 +1,21 @@ +# Kube-DNS + +This interface allows a DNS provider, such as CoreDNS, to provide name +resolution for a Kubernetes cluster. + +(Note: this interface was previously used by the Kubernetes Master charm to +communicate the DNS provider info to the Kubernetes Worker charm, but that +usage was folded into the `kube-control` interface.) + + +# Provides + +The provider should look for the `{endpoint_name}.connected` flag and call +the `set_dns_info` method with the `domain`, `sdn_ip`, and `port` info (note: +these must be provided as keyword arguments). + +# Requires + +The requirer should look for the `{endpoint_name}.available` flag and call the +`details` method, which will return a dictionary with the `domain`, `sdn-ip`, +and `port` keys. diff --git a/kubernetes-control-plane/hooks/relations/kube-dns/__init__.py b/kubernetes-control-plane/hooks/relations/kube-dns/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/kube-dns/interface.yaml b/kubernetes-control-plane/hooks/relations/kube-dns/interface.yaml new file mode 100644 index 0000000..2de32b0 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-dns/interface.yaml @@ -0,0 +1,4 @@ +name: kube-dns +summary: provides the kubernetes dns settings +version: 1 +maintainer: "Charles Butler " diff --git a/kubernetes-control-plane/hooks/relations/kube-dns/provides.py b/kubernetes-control-plane/hooks/relations/kube-dns/provides.py new file mode 100644 index 0000000..a7199c3 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-dns/provides.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import Endpoint, toggle_flag + + +class KubeDNSProvider(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.connected'), + self.is_joined) + + def set_dns_info(self, *, domain, sdn_ip, port): + '''Set the domain, sdn_ip, and port of the DNS provider.''' + for relation in self.relations: + relation.to_publish_raw.update({ + 'domain': domain, + 'sdn-ip': sdn_ip, + 'port': port, + }) diff --git a/kubernetes-control-plane/hooks/relations/kube-dns/requires.py b/kubernetes-control-plane/hooks/relations/kube-dns/requires.py new file mode 100644 index 0000000..9595c4a --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-dns/requires.py @@ -0,0 +1,36 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import Endpoint, toggle_flag + + +class KubeDNSRequireer(Endpoint): + def manage_flags(self): + '''Set flags according to whether we have DNS provider details.''' + toggle_flag(self.expand_name('{endpoint_name}.available'), + self.has_info()) + + def details(self): + '''Return the DNS provider details.''' + return { + 'domain': self._get_value('domain'), + 'sdn-ip': self._get_value('sdn-ip'), + 'port': self._get_value('port'), + } + + def has_info(self): + ''' Determine if we have all needed info''' + return all(self.details().values()) + + def _get_value(self, key): + return self.all_joined_units.received_raw.get(key) diff --git a/kubernetes-control-plane/hooks/relations/kube-masters/README.md b/kubernetes-control-plane/hooks/relations/kube-masters/README.md new file mode 100644 index 0000000..600e0fb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-masters/README.md @@ -0,0 +1,45 @@ +# kube-masters interface + +This interface provides communication amongst kubernetes-control-planes in a cluster. + +## States + +* `kube-masters.connected` + + Enabled when any kubernetes-master unit has joined the relation. + +* `kube-masters.cohorts.ready` + + Enabled when all peers have snap cohort data. + +### Methods and Properties + +* `kube-masters.set_cohort_keys(cohort_keys)` + + Set a dictionary of cohort keys created by the snap layer. + +* `kube-masters.cohort_keys` + + Dictionary of all cohort keys sent by peers. + +### Examples + +```python + +@when('kube-masters.connected') +def agree_on_cohorts(): + kube_control_planes = endpoint_from_flag('kube-masters.connected') + cohort_keys = create_cohorts_for_my_snaps() + kube_control_planes.set_cohort_keys(cohort_keys) + +@when('kube-masters.cohorts.ready', + 'kube-control.connected') +def send_cohorts_to_workers(): + kube_control_planes = endpoint_from_flag('kube-masters.cohorts.ready') + cohort_keys = kube_control_planes.cohort_keys + + kube_control = endpoint_from_flag('kube-control.connected') + # The following set method is defined in interface-kube-control + kube_control.set_cohort_keys(cohort_keys) + +``` diff --git a/kubernetes-control-plane/hooks/relations/kube-masters/__init__.py b/kubernetes-control-plane/hooks/relations/kube-masters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/kube-masters/interface.yaml b/kubernetes-control-plane/hooks/relations/kube-masters/interface.yaml new file mode 100644 index 0000000..9c01895 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-masters/interface.yaml @@ -0,0 +1,4 @@ +name: kube-masters +summary: Provides control-plane peer communication. +version: 1 +maintainer: "Kevin W. Monroe " diff --git a/kubernetes-control-plane/hooks/relations/kube-masters/peers.py b/kubernetes-control-plane/hooks/relations/kube-masters/peers.py new file mode 100644 index 0000000..bb4bc43 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kube-masters/peers.py @@ -0,0 +1,54 @@ +#!/usr/local/sbin/charm-env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import ( + Endpoint, + toggle_flag, +) + +from charmhelpers.core.hookenv import log + + +class KubeControlPlanePeer(Endpoint): + """ + Implements peering for kubernetes-control-plane units. + """ + def manage_flags(self): + """ + Set states corresponding to the data we have. + """ + toggle_flag( + self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag( + self.expand_name('{endpoint_name}.cohorts.ready'), + self.is_joined and self._peers_have_cohorts()) + + def _peers_have_cohorts(self): + """ + Return True if all peers have cohort keys. + """ + for unit in self.all_joined_units: + if not unit.received.get('cohort-keys'): + log('Unit {} does not yet have cohort-keys'.format(unit)) + return False + + log('All units have cohort-keys') + return True + + def set_cohort_keys(self, cohort_keys): + """ + Send the cohort snapshot keys. + """ + for relation in self.relations: + relation.to_publish['cohort-keys'] = cohort_keys diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/kubernetes-control-plane/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-control-plane/hooks/relations/kubernetes-cni/.gitignore new file mode 100644 index 0000000..8d150f3 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kubernetes-cni/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +.tox +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/README.md b/kubernetes-control-plane/hooks/relations/kubernetes-cni/README.md new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/__init__.py b/kubernetes-control-plane/hooks/relations/kubernetes-cni/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/interface.yaml b/kubernetes-control-plane/hooks/relations/kubernetes-cni/interface.yaml new file mode 100644 index 0000000..7e3c123 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kubernetes-cni/interface.yaml @@ -0,0 +1,6 @@ +name: kubernetes-cni +summary: Interface for relating various CNI implementations +version: 0 +maintainer: "George Kraft " +ignore: +- tests diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/provides.py b/kubernetes-control-plane/hooks/relations/kubernetes-cni/provides.py new file mode 100644 index 0000000..dae1361 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kubernetes-cni/provides.py @@ -0,0 +1,81 @@ +#!/usr/bin/python + +from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path +from charms.reactive import Endpoint +from charms.reactive import toggle_flag, clear_flag + + +class CNIPluginProvider(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) + + def config_available(self): + """Ensures all config from the CNI plugin is available.""" + goal_state = hookenv.goal_state() + related_apps = [ + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app + ] + if not related_apps: + return False + configs = self.get_configs() + return all( + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] + ) + + def get_config(self, default=None): + """Get CNI config for one related application. + + If default is specified, and there is a related application with a + matching name, then that application is chosen. Otherwise, the + application is chosen alphabetically. + + Whichever application is chosen, that application's CNI config is + returned. + """ + configs = self.get_configs() + if not configs: + return {} + elif default and default not in configs: + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") + return self.get_config() + elif default: + return configs.get(default, {}) + else: + return configs.get(sorted(configs)[0], {}) + + def get_configs(self): + """Get CNI configs for all related applications. + + This returns a mapping of application names to CNI configs. Here's an + example return value: + { + 'flannel': { + 'cidr': '10.1.0.0/16', + 'cni-conf-file': '10-flannel.conflist' + }, + 'calico': { + 'cidr': '192.168.0.0/16', + 'cni-conf-file': '10-calico.conflist' + } + } + """ + return { + relation.application_name: relation.joined_units.received_raw + for relation in self.relations + if relation.application_name + } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/kubernetes-control-plane/hooks/relations/kubernetes-cni/requires.py b/kubernetes-control-plane/hooks/relations/kubernetes-cni/requires.py new file mode 100644 index 0000000..349aa07 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/kubernetes-cni/requires.py @@ -0,0 +1,42 @@ +#!/usr/bin/python + +from charmhelpers.core import unitdata +from charms.reactive import Endpoint +from charms.reactive import when_any, when_not +from charms.reactive import set_state, remove_state + +db = unitdata.kv() + + +class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) + + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") + def changed(self): + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) + + @when_not("endpoint.{endpoint_name}.joined") + def broken(self): + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + + def get_config(self): + """Get the kubernetes configuration information.""" + return self.all_joined_units.received_raw + + def set_config(self, cidr, cni_conf_file): + """Sets the CNI configuration information.""" + for relation in self.relations: + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/kubernetes-control-plane/hooks/relations/nrpe-external-master/README.md b/kubernetes-control-plane/hooks/relations/nrpe-external-master/README.md new file mode 100644 index 0000000..e33deb8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/nrpe-external-master/README.md @@ -0,0 +1,66 @@ +# nrpe-external-master interface + +Use this interface to register nagios checks in your charm layers. + +## Purpose + +This interface is designed to interoperate with the +[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm. + +## How to use in your layers + +The event handler for `nrpe-external-master.available` is called with an object +through which you can register your own custom nagios checks, when a relation +is established with `nrpe-external-master:nrpe-external-master`. + +This object provides a method, + +_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_) + +which is called to register a nagios plugin check for your service. + +All arguments are required. + +*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable. + +*name* is the name of the check registered in nagios + +*description* is some text that describes what the check is for and what it does + +*context* is the nagios context name, something that identifies your application + +*unit* is `hookenv.local_unit()` + +The nrpe subordinate installs `check_http`, so you can use it like this: + +``` +@when('nrpe-external-master.available') +def setup_nagios(nagios): + config = hookenv.config() + unit_name = hookenv.local_unit() + nagios.add_check(['/usr/lib/nagios/plugins/check_http', + '-I', '127.0.0.1', '-p', str(config['port']), + '-e', " 200 OK", '-u', '/publickey'], + name="check_http", + description="Verify my awesome service is responding", + context=config["nagios_context"], + unit=unit_name, + ) +``` +If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service. + +Consult the nagios documentation for more information on [how to write your own +plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html) +or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need. + +## Example deployment + +``` +$ juju deploy your-awesome-charm +$ juju deploy nrpe-external-master --config site-nagios.yaml +$ juju add-relation your-awesome-charm nrpe-external-master +``` + +where `site-nagios.yaml` has the necessary configuration settings for the +subordinate to connect to nagios. + diff --git a/kubernetes-control-plane/hooks/relations/nrpe-external-master/__init__.py b/kubernetes-control-plane/hooks/relations/nrpe-external-master/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/nrpe-external-master/interface.yaml b/kubernetes-control-plane/hooks/relations/nrpe-external-master/interface.yaml new file mode 100644 index 0000000..859a423 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/nrpe-external-master/interface.yaml @@ -0,0 +1,3 @@ +name: nrpe-external-master +summary: Nagios interface +version: 1 diff --git a/kubernetes-control-plane/hooks/relations/nrpe-external-master/provides.py b/kubernetes-control-plane/hooks/relations/nrpe-external-master/provides.py new file mode 100644 index 0000000..b10f501 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/nrpe-external-master/provides.py @@ -0,0 +1,62 @@ +import datetime + +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class NrpeExternalMasterProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:nrpe-external-master}-relation-{joined,changed}') + def changed_nrpe(self): + self.set_state('{relation_name}.available') + + @hook('{provides:nrpe-external-master}-relation-{broken,departed}') + def broken_nrpe(self): + self.remove_state('{relation_name}.available') + + def add_check(self, args, name=None, description=None, context=None, + servicegroups=None, unit=None): + unit = unit.replace('/', '-') + check_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +command[%(check_name)s]=%(check_args)s +""" + service_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service { + use active-service + host_name %(context)s-%(unit_name)s + service_description %(description)s + check_command check_nrpe!%(check_name)s + servicegroups %(servicegroups)s +} +""" + check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name) + with open(check_filename, "w") as fh: + fh.write(check_tmpl % { + 'check_args': ' '.join(args), + 'check_name': name, + }) + service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % ( + unit, name) + with open(service_filename, "w") as fh: + fh.write(service_tmpl % { + 'servicegroups': servicegroups or context, + 'context': context, + 'description': description, + 'check_name': name, + 'unit_name': unit, + }) + + def updated(self): + relation_info = { + 'timestamp': datetime.datetime.now().isoformat(), + } + self.set_remote(**relation_info) + self.remove_state('{relation_name}.available') diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/.gitignore b/kubernetes-control-plane/hooks/relations/openstack-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/LICENSE b/kubernetes-control-plane/hooks/relations/openstack-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/README.md b/kubernetes-control-plane/hooks/relations/openstack-integration/README.md new file mode 100644 index 0000000..ae021c2 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `openstack-integration` interface communciation +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:openstack-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:openstack-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `openstack-integration` interface protocol: + +```yaml +requires: + openstack: + interface: openstack-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the openstack-integrator charm) diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/__init__.py b/kubernetes-control-plane/hooks/relations/openstack-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/copyright b/kubernetes-control-plane/hooks/relations/openstack-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/docs/provides.md b/kubernetes-control-plane/hooks/relations/openstack-integration/docs/provides.md new file mode 100644 index 0000000..ee17ac6 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/docs/provides.md @@ -0,0 +1,108 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the +OpenStack integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for OpenStack integration + features. The OpenStack integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. + +

OpenStackIntegrationProvides

+ +```python +OpenStackIntegrationProvides(endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.openstack.requests-pending') +def handle_requests(): + openstack = endpoint_from_flag('endpoint.openstack.requests-pending') + for request in openstack.requests: + request.set_credentials(layer.openstack.get_user_credentials()) + openstack.mark_completed() +``` + +

all_requests

+ + +A list of all of the [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.all_requests.IntegrationRequests) that have been made. + +

new_requests

+ + +A list of the new or updated [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.new_requests.IntegrationRequests) that have been made. + +

mark_completed

+ +```python +OpenStackIntegrationProvides.mark_completed() +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(unit) +``` + +A request for integration from a single remote unit. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(auth_url, + region, + username, + password, + user_domain_name, + project_domain_name, + project_name, + endpoint_tls_ca, + version=None) +``` + +Set the credentials for this request. + +

set_lbaas_config

+ +```python +IntegrationRequest.set_lbaas_config(subnet_id, + floating_network_id, + lb_method, + manage_security_groups, + has_octavia=None) +``` + +Set the load-balancer-as-a-service config for this request. + +

set_block_storage_config

+ +```python +IntegrationRequest.set_block_storage_config(bs_version, trust_device_path, + ignore_volume_az) +``` + +Set the block storage config for this request. + diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/docs/requires.md b/kubernetes-control-plane/hooks/relations/openstack-integration/docs/requires.md new file mode 100644 index 0000000..510e292 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/docs/requires.md @@ -0,0 +1,160 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that wish +to request integration with OpenStack native features. The integration will be +provided by the OpenStack integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of OpenStack +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific OpenStack features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the OpenStack instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. + +

OpenStackIntegrationRequires

+ +```python +OpenStackIntegrationRequires(endpoint_name, relation_ids=None) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.openstack.ready') +def openstack_integration_ready(): + openstack = endpoint_from_flag('endpoint.openstack.ready') + update_config_enable_openstack(openstack) +``` + +

auth_url

+ + +The authentication endpoint URL. + +

bs_version

+ + +What block storage API version to use, `auto` if autodetection is +desired, or `None` to use the default. + +

endpoint_tls_ca

+ + +Optional base64-encoded CA certificate for the authentication endpoint, +or None. + +

floating_network_id

+ + +Optional floating network ID, or None. + +

has_octavia

+ + +Whether the underlying OpenStack supports Octavia instead of +Neutron-based LBaaS. + +Will either be True, False, or None if it could not be determined for +some reason (typically due to connecting to an older integrator charm). + +

ignore_volume_az

+ + +Whether to ignore availability zones when attaching Cinder volumes. + +Will be `True`, `False`, or `None`. + +

is_changed

+ + +Whether or not the request for this instance has changed. + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

lb_method

+ + +Optional load-balancer method, or None. + +

manage_security_groups

+ + +Whether or not the Load Balancer should automatically manage security +group rules. + +Will be `True` or `False`. + +

password

+ + +The password. + +

project_domain_name

+ + +The project domain name. + +

project_name

+ + +The project name, also known as the tenant ID. + +

region

+ + +The region name. + +

subnet_id

+ + +Optional subnet ID to work in, or None. + +

trust_device_path

+ + +Whether to trust the block device name provided by Ceph. + +Will be `True`, `False`, or `None`. + +

user_domain_name

+ + +The user domain name. + +

username

+ + +The username. + +

version

+ + +Optional version number for the APIs or None. + diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/interface.yaml b/kubernetes-control-plane/hooks/relations/openstack-integration/interface.yaml new file mode 100644 index 0000000..a94fed4 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/interface.yaml @@ -0,0 +1,4 @@ +name: openstack-integration +summary: Interface for connecting to the OpenStack integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/make_docs b/kubernetes-control-plane/hooks/relations/openstack-integration/make_docs new file mode 100644 index 0000000..a09c66f --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'openstack': {'interface': 'openstack'}}, + 'provides': {'openstack': {'interface': 'openstack'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/provides.py b/kubernetes-control-plane/hooks/relations/openstack-integration/provides.py new file mode 100644 index 0000000..2c788d6 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/provides.py @@ -0,0 +1,154 @@ +""" +This is the provides side of the interface layer, for use only by the +OpenStack integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for OpenStack integration + features. The OpenStack integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class OpenStackIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.openstack.requests-pending') + def handle_requests(): + openstack = endpoint_from_flag('endpoint.openstack.requests-pending') + for request in openstack.requests: + request.set_credentials(layer.openstack.get_user_credentials()) + openstack.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.all_requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def all_requests(self): + """ + A list of all of the #IntegrationRequests that have been made. + """ + if not hasattr(self, '_all_requests'): + self._all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + return self._all_requests + + @property + def new_requests(self): + """ + A list of the new or updated #IntegrationRequests that have been made. + """ + is_changed = attrgetter('is_changed') + return list(filter(is_changed, self.all_requests)) + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + clear_flag(self.expand_name('requests-pending')) + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + return not self.has_credentials + + @property + def unit_name(self): + return self._unit.unit_name + + def set_credentials(self, + auth_url, + region, + username, + password, + user_domain_name, + project_domain_name, + project_name, + endpoint_tls_ca, + version=None): + """ + Set the credentials for this request. + """ + self._unit.relation.to_publish.update({ + 'auth_url': auth_url, + 'region': region, + 'username': username, + 'password': password, + 'user_domain_name': user_domain_name, + 'project_domain_name': project_domain_name, + 'project_name': project_name, + 'endpoint_tls_ca': endpoint_tls_ca, + 'version': version, + }) + + def set_lbaas_config(self, + subnet_id, + floating_network_id, + lb_method, + manage_security_groups, + has_octavia=None, + internal_lb=False): + """ + Set the load-balancer-as-a-service config for this request. + """ + self._unit.relation.to_publish.update({ + 'subnet_id': subnet_id, + 'floating_network_id': floating_network_id, + 'lb_method': lb_method, + 'internal_lb': internal_lb, + 'manage_security_groups': manage_security_groups, + 'has_octavia': has_octavia, + }) + + def set_block_storage_config(self, + bs_version, + trust_device_path, + ignore_volume_az): + """ + Set the block storage config for this request. + """ + self._unit.relation.to_publish.update({ + 'bs_version': bs_version, + 'trust_device_path': trust_device_path, + 'ignore_volume_az': ignore_volume_az, + }) + + @property + def has_credentials(self): + """ + Whether or not credentials have been set via `set_credentials`. + """ + return 'credentials' in self._unit.relation.to_publish diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/pydocmd.yml b/kubernetes-control-plane/hooks/relations/openstack-integration/pydocmd.yml new file mode 100644 index 0000000..aa0a286 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'OpenStack Integration Interface' + +generate: + - requires.md: + - requires + - requires.OpenStackIntegrationRequires+ + - provides.md: + - provides + - provides.OpenStackIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/openstack-integration/requires.py b/kubernetes-control-plane/hooks/relations/openstack-integration/requires.py new file mode 100644 index 0000000..3566b45 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/openstack-integration/requires.py @@ -0,0 +1,263 @@ +""" +This is the requires side of the interface layer, for use in charms that wish +to request integration with OpenStack native features. The integration will be +provided by the OpenStack integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of OpenStack +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific OpenStack features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the OpenStack instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. +""" + + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag, is_flag_set +from charms.reactive import data_changed + + +class OpenStackIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.openstack.ready') + def openstack_integration_ready(): + openstack = endpoint_from_flag('endpoint.openstack.ready') + update_config_enable_openstack(openstack) + ``` + """ + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single OpenStack integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single OpenStack integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + was_ready = is_flag_set(self.expand_name('ready')) + toggle_flag(self.expand_name('ready'), self.is_ready) + if self.is_ready and was_ready and self.is_changed: + set_flag(self.expand_name('ready.changed')) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + # Although more information can be passed, such as LBaaS access + # the minimum needed to be considered ready is defined here + return all(field is not None for field in [ + self.auth_url, + self.username, + self.password, + self.user_domain_name, + self.project_domain_name, + self.project_name, + ]) + + @property + def is_changed(self): + """ + Whether or not the request for this instance has changed. + """ + return data_changed(self.expand_name('all-data'), [ + self.auth_url, + self.region, + self.username, + self.password, + self.user_domain_name, + self.project_domain_name, + self.project_name, + self.endpoint_tls_ca, + self.subnet_id, + self.floating_network_id, + self.lb_method, + self.internal_lb, + self.manage_security_groups, + ]) + + @property + def auth_url(self): + """ + The authentication endpoint URL. + """ + return self._received['auth_url'] + + @property + def region(self): + """ + The region name. + """ + return self._received['region'] + + @property + def username(self): + """ + The username. + """ + return self._received['username'] + + @property + def password(self): + """ + The password. + """ + return self._received['password'] + + @property + def user_domain_name(self): + """ + The user domain name. + """ + return self._received['user_domain_name'] + + @property + def project_domain_name(self): + """ + The project domain name. + """ + return self._received['project_domain_name'] + + @property + def project_name(self): + """ + The project name, also known as the tenant ID. + """ + return self._received['project_name'] + + @property + def endpoint_tls_ca(self): + """ + Optional base64-encoded CA certificate for the authentication endpoint, + or None. + """ + return self._received['endpoint_tls_ca'] or None + + @property + def version(self): + """ + Optional version number for the APIs or None. + """ + return self._received['version'] or None + + @property + def subnet_id(self): + """ + Optional subnet ID to work in, or None. + """ + return self._received['subnet_id'] + + @property + def floating_network_id(self): + """ + Optional floating network ID, or None. + """ + return self._received['floating_network_id'] + + @property + def lb_method(self): + """ + Optional load-balancer method, or None. + """ + return self._received['lb_method'] + + @property + def internal_lb(self) -> bool: + """ + If should force internal loadbalancer use. + Defaults to false. + """ + return bool(self._received.get('internal_lb', False)) + + @property + def manage_security_groups(self): + """ + Whether or not the Load Balancer should automatically manage security + group rules. + + Will be `True` or `False`. + """ + return self._received['manage_security_groups'] or False + + @property + def bs_version(self): + """ + What block storage API version to use, `auto` if autodetection is + desired, or `None` to use the default. + """ + return self._received['bs_version'] + + @property + def trust_device_path(self): + """ + Whether to trust the block device name provided by Ceph. + + Will be `True`, `False`, or `None`. + """ + return self._received['trust_device_path'] + + @property + def ignore_volume_az(self): + """ + Whether to ignore availability zones when attaching Cinder volumes. + + Will be `True`, `False`, or `None`. + """ + return self._received['ignore_volume_az'] + + @property + def has_octavia(self): + """ + Whether the underlying OpenStack supports Octavia instead of + Neutron-based LBaaS. + + Will either be True, False, or None if it could not be determined for + some reason (typically due to connecting to an older integrator charm). + """ + return self._received['has_octavia'] diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/.gitignore b/kubernetes-control-plane/hooks/relations/prometheus-manual/.gitignore new file mode 100644 index 0000000..01a6a44 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/.gitignore @@ -0,0 +1,3 @@ +.docs +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/LICENSE b/kubernetes-control-plane/hooks/relations/prometheus-manual/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/README.md b/kubernetes-control-plane/hooks/relations/prometheus-manual/README.md new file mode 100644 index 0000000..4ff5c83 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/README.md @@ -0,0 +1,113 @@ +# Interface prometheus-manual + +This is a [Juju][] interface layer that enables a charm which provides manual +or raw metric scraper job configuration stanzas for Prometheus 2. + +The format for the job configuration data can be found in the [Prometheus +Configuration Docs][]. The job configuration will be included as an item +under `scrape_configs` largely unchanged, except for two things: + +* To ensure uniqueness, the provided job name will have a UUID appended to it. +* Because the CA cert must be written to disk separately from the config, any + `tls_config` sections will have their `ca_file` field values replaced with + the path to the file where the provided `ca_cert` data is written. + +# Example Usage + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +provides: + prometheus: + interface: prometheus-manual +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:prometheus-manual +``` + +Then, in your reactive code, add the following, modifying the job data as +your charm needs: + +```python +from charms.reactive import endpoint_from_flag + + +@when('endpoint.prometheus.joined', + 'tls.ca.available') +def register_prometheus_jobs(): + prometheus = endpoint_from_flag('endpoint.prometheus.joined') + tls = endpoint_from_flag('tls.ca.available') + prometheus.register_job(job_name='kubernetes-apiservers', + ca_cert=tls.root_ca_cert, + job_data={ + 'kubernetes_sd_configs': [{'role': 'endpoints'}], + 'scheme': 'https', + 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename + 'bearer_token': get_token('system:prometheus'), + }) + prometheus.register_job(job_name='kubernetes-nodes', + ca_cert=tls.root_ca_cert, + job_data={ + 'kubernetes_sd_configs': [{'role': 'node'}], + 'scheme': 'https', + 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename + 'bearer_token': get_token('system:prometheus'), + }) +``` + + + +# Reference + +* [common.md](common.md) + * [JobRequest](docs/common.md#jobrequest) + * [egress_subnets](docs/common.md#jobrequest-egress_subnets) + * [fromkeys](docs/common.md#jobrequest-fromkeys) + * [ingress_address](docs/common.md#jobrequest-ingress_address) + * [is_created](docs/common.md#jobrequest-is_created) + * [is_received](docs/common.md#jobrequest-is_received) + * [respond](docs/common.md#jobrequest-respond) + * [to_json](docs/common.md#jobrequest-to_json) + * [JobResponse](docs/common.md#jobresponse) + * [fromkeys](docs/common.md#jobresponse-fromkeys) +* [provides.md](provides.md) + * [PrometheusManualProvides](docs/provides.md#prometheusmanualprovides) + * [all_departed_units](docs/provides.md#prometheusmanualprovides-all_departed_units) + * [all_joined_units](docs/provides.md#prometheusmanualprovides-all_joined_units) + * [all_units](docs/provides.md#prometheusmanualprovides-all_units) + * [endpoint_name](docs/provides.md#prometheusmanualprovides-endpoint_name) + * [is_joined](docs/provides.md#prometheusmanualprovides-is_joined) + * [joined](docs/provides.md#prometheusmanualprovides-joined) + * [manage_flags](docs/provides.md#prometheusmanualprovides-manage_flags) + * [register_job](docs/provides.md#prometheusmanualprovides-register_job) + * [relations](docs/provides.md#prometheusmanualprovides-relations) + * [requests](docs/provides.md#prometheusmanualprovides-requests) + * [responses](docs/provides.md#prometheusmanualprovides-responses) +* [requires.md](requires.md) + * [PrometheusManualRequires](docs/requires.md#prometheusmanualrequires) + * [all_departed_units](docs/requires.md#prometheusmanualrequires-all_departed_units) + * [all_joined_units](docs/requires.md#prometheusmanualrequires-all_joined_units) + * [all_requests](docs/requires.md#prometheusmanualrequires-all_requests) + * [all_units](docs/requires.md#prometheusmanualrequires-all_units) + * [endpoint_name](docs/requires.md#prometheusmanualrequires-endpoint_name) + * [is_joined](docs/requires.md#prometheusmanualrequires-is_joined) + * [jobs](docs/requires.md#prometheusmanualrequires-jobs) + * [joined](docs/requires.md#prometheusmanualrequires-joined) + * [manage_flags](docs/requires.md#prometheusmanualrequires-manage_flags) + * [new_jobs](docs/requires.md#prometheusmanualrequires-new_jobs) + * [new_requests](docs/requires.md#prometheusmanualrequires-new_requests) + * [relations](docs/requires.md#prometheusmanualrequires-relations) + + + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Prometheus Configuration Docs]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/ diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/__init__.py b/kubernetes-control-plane/hooks/relations/prometheus-manual/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/common.py b/kubernetes-control-plane/hooks/relations/prometheus-manual/common.py new file mode 100644 index 0000000..530f965 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/common.py @@ -0,0 +1,57 @@ +import json +from copy import deepcopy + +from charms.reactive import BaseRequest, BaseResponse, Field + + +class JobResponse(BaseResponse): + success = Field('Whether or not the registration succeeded') + reason = Field('If failed, a description of why') + + +class JobRequest(BaseRequest): + RESPONSE_CLASS = JobResponse + + job_name = Field('Desired name for the job. To ensure uniqueness, the ' + 'the request ID will be appended to the final job name.') + + job_data = Field('Config data for the job.') + + ca_cert = Field('Cert data for the CA used to validate connections.') + + def to_json(self, ca_file=None): + """ + Render the job request to JSON string which can be included directly + into Prometheus config. + + Keys will be sorted in the rendering to ensure a stable ordering for + comparisons to detect changes. + + If `ca_file` is given, it will be used to replace the value of any + `ca_file` fields in the job. The charm should ensure that the + request's `ca_cert` data is writen to that path prior to calling this + method. + """ + job_data = deepcopy(self.job_data) # make a copy we can modify + job_data['job_name'] = '{}-{}'.format(self.job_name, self.request_id) + + if ca_file: + for key, value in job_data.items(): + # update the cert path at the job level + if key == 'tls_config': + value['ca_file'] = str(ca_file) + + # update the cert path at the SD config level + if key.endswith('_sd_configs'): + for sd_config in value: + if 'ca_file' in sd_config.get('tls_config', {}): + sd_config['tls_config']['ca_file'] = str(ca_file) + + return json.dumps(job_data, sort_keys=True) + + def respond(self, success, reason=None): + """ + Acknowledge this request, and indicate success or failure with an + optional explanation. + """ + super().respond(success=success, reason=reason) diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/copyright b/kubernetes-control-plane/hooks/relations/prometheus-manual/copyright new file mode 100644 index 0000000..69768db --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/common.md b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/common.md new file mode 100644 index 0000000..a97d54b --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/common.md @@ -0,0 +1,62 @@ +# `class JobRequest(BaseRequest)` + +Base class for requests using the request / response pattern. + +Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of +the :class:`BaseResponse` which defines the fields that the response will +use. They must also define additional attributes as :class:`Field`s. + +For example:: + + class TLSResponse(BaseResponse): + key = Field('Private key for the cert') + cert = Field('Public cert info') + + + class TLSRequest(BaseRequest): + RESPONSE_CLASS = TLSResponse + + common_name = Field('Common Name (CN) for the cert to be created') + sans = Field('List of Subject Alternative Names (SANs)') + +## `egress_subnets` + +Subnets over which network traffic to the requester will flow. + +## `None` + +Returns a new dict with keys from iterable and values equal to value. + +## `ingress_address` + +Address to use if a connection to the requester is required. + +## `is_created` + +Whether this request was created by this side of the relation. + +## `is_received` + +Whether this request was received by the other side of the relation. + +## `def respond(self, success, reason=None)` + +Acknowledge this request, and indicate success or failure with an +optional explanation. + +## `def to_json(self)` + +Render the job request to JSON string which can be included directly +into Prometheus config. + +Keys will be sorted in the rendering to ensure a stable ordering for +comparisons to detect changes. + +# `class JobResponse(BaseResponse)` + +Base class for responses using the request / response pattern. + +## `None` + +Returns a new dict with keys from iterable and values equal to value. + diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/provides.md b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/provides.md new file mode 100644 index 0000000..439b4e5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/provides.md @@ -0,0 +1,119 @@ +# `class PrometheusManualProvides(RequesterEndpoint)` + +Base class for Endpoints that create requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `def register_job(self, job_name, job_data, ca_cert=None)` + +Register a manual job. + +The job data should be the (unserialized) data defining the job. + +To ensure uniqueness, a UUID will be added to the job name, and it will +be injected into the job data. + +If a CA cert is given, the value of any ca_file field in the job data +will be replaced with a filename after the CA cert data is written, so +a placeholder value should be used. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + +## `requests` + +A list of all requests which have been submitted. + +## `responses` + +A list of all responses which have been received. + diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/requires.md b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/requires.md new file mode 100644 index 0000000..31a7e8f --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/docs/requires.md @@ -0,0 +1,117 @@ +# `class PrometheusManualRequires(ResponderEndpoint)` + +Base class for Endpoints that respond to requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_requests` + +A list of all requests, including ones which have been responded to. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `jobs` + +Return a list of all jobs to be registered. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `new_jobs` + +Return a list of new jobs to be registered. + +## `new_requests` + +A list of requests which have not been responded. + +Requests should be handled by the charm and then responded to by +calling ``request.respond(...)``. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/interface.yaml b/kubernetes-control-plane/hooks/relations/prometheus-manual/interface.yaml new file mode 100644 index 0000000..5c324c6 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/interface.yaml @@ -0,0 +1,6 @@ +name: prometheus-manual +summary: Interface for registering manual job definitions with Prometheus +version: 1 +maintainer: "Cory Johns " +exclude: + - .docs diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/provides.py b/kubernetes-control-plane/hooks/relations/prometheus-manual/provides.py new file mode 100644 index 0000000..884629c --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/provides.py @@ -0,0 +1,41 @@ +from charms.reactive import ( + toggle_flag, + RequesterEndpoint, +) + +from .common import JobRequest + + +class PrometheusManualProvides(RequesterEndpoint): + REQUEST_CLASS = JobRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined and self.requests) + + def register_job(self, job_name, job_data, ca_cert=None, relation=None): + """ + Register a manual job. + + The job data should be the (unserialized) data defining the job. + + To ensure uniqueness, a UUID will be added to the job name, and it will + be injected into the job data. + + If a CA cert is given, the value of any ca_file field in the job data + will be replaced with a filename after the CA cert data is written, so + a placeholder value should be used. + + If a specific relation is not given, the job will be registered with + every related Prometheus. + """ + # we might be connected to multiple prometheuses for some strange + # reason, so just send the job to all of them + relations = [relation] if relation is not None else self.relations + for relation in relations: + JobRequest.create_or_update(match_fields=['job_name'], + relation=relation, + job_name=job_name, + job_data=job_data, + ca_cert=ca_cert) diff --git a/kubernetes-control-plane/hooks/relations/prometheus-manual/requires.py b/kubernetes-control-plane/hooks/relations/prometheus-manual/requires.py new file mode 100644 index 0000000..a8d1acb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/prometheus-manual/requires.py @@ -0,0 +1,31 @@ +from charms.reactive import ( + toggle_flag, + ResponderEndpoint, +) + +from .common import JobRequest + + +class PrometheusManualRequires(ResponderEndpoint): + REQUEST_CLASS = JobRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.has_jobs'), + self.is_joined and self.jobs) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.new_jobs'), + self.is_joined and self.new_jobs) + + @property + def jobs(self): + """ + Return a list of all jobs to be registered. + """ + return self.all_requests + + @property + def new_jobs(self): + """ + Return a list of new jobs to be registered. + """ + return self.new_requests diff --git a/kubernetes-control-plane/hooks/relations/public-address/README.md b/kubernetes-control-plane/hooks/relations/public-address/README.md new file mode 100644 index 0000000..06be3ae --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/public-address/README.md @@ -0,0 +1,59 @@ +# Overview + +This interface layer implements a public address protocol useful for load +balancers and their subordinates. The load balancers (providers) set their +own public address and port, which is then available to the subordinates +(requirers). + +# Usage + +## Provides + +By providing the `public-address` interface, your charm is providing an HTTP +server that can load-balance for another HTTP based service. + +Your charm need only provide the address and port on which it is serving its +content, as soon as the `{relation_name}.available` state is set: + +```python +from charmhelpers.core import hookenv +@when('website.available') +def configure_website(website): + website.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port')) +``` + +## Requires + +By requiring the `public-address` interface, your charm is consuming one or +more HTTP servers, to load-balance a set of servers, etc. + +Your charm should respond to the `{relation_name}.available` state, which +indicates that there is at least one HTTP server connected. + +The `get_addresses_ports()` method returns a list of available addresses and +ports. + +The return value is a list of dicts of the following form: + +```python +[ + { + 'public-address': address_of_host, + 'port': port_for_host, + }, + # ... +] +``` + +A trivial example of handling this interface would be: + +```python +from charmhelpers.core import hookenv +@when('loadbalancer.available') +def update_reverse_proxy_config(loadbalancer): + hosts = loadbalancer.get_addresses_ports() + for host in hosts: + hookenv.log('The loadbalancer for this unit is {}:{}'.format( + host['public-address'], + host['port'])) +``` diff --git a/kubernetes-control-plane/hooks/relations/public-address/__init__.py b/kubernetes-control-plane/hooks/relations/public-address/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/public-address/interface.yaml b/kubernetes-control-plane/hooks/relations/public-address/interface.yaml new file mode 100644 index 0000000..c9849e4 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/public-address/interface.yaml @@ -0,0 +1,4 @@ +name: public-address +summary: A basic interface to provide the public address for load balancers. +version: 1 +repo: https://githb.com/juju-solutions/interface-public-address.git diff --git a/kubernetes-control-plane/hooks/relations/public-address/provides.py b/kubernetes-control-plane/hooks/relations/public-address/provides.py new file mode 100644 index 0000000..09b9915 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/public-address/provides.py @@ -0,0 +1,60 @@ +import json + +from charms.reactive import toggle_flag +from charms.reactive import Endpoint + + +class PublicAdddressProvides(Endpoint): + + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.available'), + self.is_joined) + + def set_address_port(self, address, port, relation=None): + if relation is None: + # no relation specified, so send the same data to everyone + relations = self.relations + else: + # specific relation given, so only send the data to that one + relations = [relation] + if type(address) is list: + # build 2 lists to zip together that are the same length + length = len(address) + p = [port] * length + combined = zip(address, p) + clients = [{'public-address': a, 'port': p} + for a, p in combined] + # for backwards compatibility, we just send a single entry + # and have an array of dictionaries in a field of that + # entry for the other entries. + first = clients.pop(0) + first['extended_data'] = json.dumps(clients) + for relation in relations: + relation.to_publish_raw.update(first) + else: + for relation in relations: + relation.to_publish_raw.update({'public-address': address, + 'port': port}) + + @property + def requests(self): + return [Request(rel) for rel in self.relations] + + +class Request: + def __init__(self, rel): + self.rel = rel + + @property + def application_name(self): + return self.rel.application_name + + @property + def members(self): + return [(u.received_raw.get('ingress-address', + u.received_raw['private-address']), + u.received_raw.get('port', '6443')) + for u in self.rel.joined_units] + + def set_address_port(self, address, port): + self.rel.endpoint.set_address_port(address, port, self.rel) diff --git a/kubernetes-control-plane/hooks/relations/public-address/requires.py b/kubernetes-control-plane/hooks/relations/public-address/requires.py new file mode 100644 index 0000000..467d129 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/public-address/requires.py @@ -0,0 +1,44 @@ +import json + +from charms.reactive import toggle_flag, Endpoint + + +class PublicAddressRequires(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.available'), + len(self.get_addresses_ports()) > 0) + + def set_backend_port(self, port): + """ + Set the port that the backend service is listening on. + + Defaults to 6443 if not set. + """ + for rel in self.relations: + rel.to_publish_raw['port'] = str(port) + + def get_addresses_ports(self): + '''Returns a list of available HTTP providers and their associated + public addresses and ports. + + The return value is a list of dicts of the following form:: + [ + { + 'public-address': address_for_frontend, + 'port': port_for_frontend, + }, + # ... + ] + ''' + hosts = set() + for relation in self.relations: + for unit in relation.joined_units: + data = unit.received_raw + hosts.add((data['public-address'], data['port'])) + if 'extended_data' in data: + for ed in json.loads(data['extended_data']): + hosts.add((ed['public-address'], ed['port'])) + + return [{'public-address': pa, 'port': p} + for pa, p in sorted(host for host in hosts + if None not in host)] diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/.gitignore b/kubernetes-control-plane/hooks/relations/tls-certificates/.gitignore new file mode 100644 index 0000000..93813bc --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/.gitignore @@ -0,0 +1,4 @@ +.tox +__pycache__ +*.pyc +_build diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/README.md b/kubernetes-control-plane/hooks/relations/tls-certificates/README.md new file mode 100644 index 0000000..733da6d --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/README.md @@ -0,0 +1,90 @@ +# Interface tls-certificates + +This is a [Juju][] interface layer that enables a charm which requires TLS +certificates to relate to a charm which can provide them, such as [Vault][] or +[EasyRSA][] + +To get started please read the [Introduction to PKI][] which defines some PKI +terms, concepts and processes used in this document. + +# Example Usage + +Let's say you have a charm which needs a server certificate for a service it +provides to other charms and a client certificate for a database it consumes +from another charm. The charm provides its own service on the `clients` +relation endpoint, and it consumes the database on the `db` relation endpoint. + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +requires: + cert-provider: + interface: tls-certificates +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:tls-certificates +``` + +Then, in your reactive code, add the following, changing `update_certs` to +handle the certificates however your charm needs: + +```python +from charmhelpers.core import hookenv, host +from charms.reactive import endpoint_from_flag + + +@when('cert-provider.ca.changed') +def install_root_ca_cert(): + cert_provider = endpoint_from_flag('cert-provider.ca.available') + host.install_ca_cert(cert_provider.root_ca_cert) + clear_flag('cert-provider.ca.changed') + + +@when('cert-provider.available') +def request_certificates(): + cert_provider = endpoint_from_flag('cert-provider.available') + + # get ingress info + ingress_for_clients = hookenv.network_get('clients')['ingress-addresses'] + ingress_for_db = hookenv.network_get('db')['ingress-addresses'] + + # use first ingress address as primary and any additional as SANs + server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1] + client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1] + + # request a single server and single client cert; note that multiple certs + # of either type can be requested as long as they have unique common names + cert_provider.request_server_cert(server_cn, server_sans) + cert_provider.request_client_cert(client_cn, client_sans) + + +@when('cert-provider.certs.changed') +def update_certs(): + cert_provider = endpoint_from_flag('cert-provider.available') + server_cert = cert_provider.server_certs[0] # only requested one + myserver.update_server_cert(server_cert.cert, server_cert.key) + + client_cert = cert_provider.client_certs[0] # only requested one + myclient.update_client_cert(client_cert.cert, client_cert.key) + clear_flag('cert-provider.certs.changed') +``` + + +# Reference + + * [Requires](docs/requires.md) + * [Provides](docs/provides.md) + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Vault]: https://jujucharms.com/u/openstack-charmers/vault +[EasyRSA]: https://jujucharms.com/u/containers/easyrsa +[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/__init__.py b/kubernetes-control-plane/hooks/relations/tls-certificates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/docs/common.md b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/common.md new file mode 100644 index 0000000..25d0e08 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/common.md @@ -0,0 +1,51 @@ +

CertificateRequest

+ +```python +CertificateRequest(self, unit, cert_type, cert_name, common_name, sans) +``` + +

application_name

+ +Name of the application which the request came from. + +:returns: Name of application +:rtype: str + +

cert

+ + +The cert published for this request, if any. + +

cert_type

+ + +Type of certificate, 'server' or 'client', being requested. + +

resolve_unit_name

+ +```python +CertificateRequest.resolve_unit_name(unit) +``` +Return name of unit associated with this request. + +unit_name should be provided in the relation data to ensure +compatability with cross-model relations. If the unit name +is absent then fall back to unit_name attribute of the +unit associated with this request. + +:param unit: Unit to extract name from +:type unit: charms.reactive.endpoints.RelatedUnit +:returns: Name of unit +:rtype: str + +

Certificate

+ +```python +Certificate(self, cert_type, common_name, cert, key) +``` + +Represents a created certificate and key. + +The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can +be accessed either as properties or as the contents of the dict. + diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/docs/provides.md b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/provides.md new file mode 100644 index 0000000..c213546 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/provides.md @@ -0,0 +1,212 @@ +

provides

+ + +

TlsProvides

+ +```python +TlsProvides(self, endpoint_name, relation_ids=None) +``` + +The provider's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[all_requests]: provides.md#provides.TlsProvides.all_requests +[new_requests]: provides.md#provides.TlsProvides.new_requests +[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests +[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + +

all_published_certs

+ + +List of all [Certificate][] instances that this provider has published +for all related applications. + +

all_requests

+ + +List of all requests that have been made. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('certs.regen', + 'tls.certs.available') +def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_application_requests

+ + +Filtered view of [new_requests][] that only includes application cert +requests. + +Each will be an instance of [ApplicationCertificateRequest][]. + +Example usage: + +```python +@when('tls.application.certs.requested') +def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_client_requests

+ + +Filtered view of [new_requests][] that only includes client cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.client.certs.requested') +def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_requests

+ + +Filtered view of [all_requests][] that only includes requests that +haven't been handled. + +Each will be an instance of [CertificateRequest][]. + +This collection can also be further filtered by request type using +[new_server_requests][] or [new_client_requests][]. + +Example usage: + +```python +@when('tls.certs.requested') +def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_server_requests

+ + +Filtered view of [new_requests][] that only includes server cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.server.certs.requested') +def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

set_ca

+ +```python +TlsProvides.set_ca(certificate_authority) +``` + +Publish the CA to all related applications. + +

set_chain

+ +```python +TlsProvides.set_chain(chain) +``` + +Publish the chain of trust to all related applications. + +

set_client_cert

+ +```python +TlsProvides.set_client_cert(cert, key) +``` + +Deprecated. This is only for backwards compatibility. + +Publish a globally shared client cert and key. + +

set_server_cert

+ +```python +TlsProvides.set_server_cert(scope, cert, key) +``` + +Deprecated. Use one of the [new_requests][] collections and +`request.set_cert()` instead. + +Set the server cert and key for the request identified by `scope`. + +

set_server_multicerts

+ +```python +TlsProvides.set_server_multicerts(scope) +``` + +Deprecated. Done automatically. + +

add_server_cert

+ +```python +TlsProvides.add_server_cert(scope, cn, cert, key) +``` + +Deprecated. Use `request.set_cert()` instead. + +

get_server_requests

+ +```python +TlsProvides.get_server_requests() +``` + +Deprecated. Use the [new_requests][] or [server_requests][] +collections instead. + +One provider can have many requests to generate server certificates. +Return a map of all server request objects indexed by a unique +identifier. + diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/docs/requires.md b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/requires.md new file mode 100644 index 0000000..fdec902 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/docs/requires.md @@ -0,0 +1,207 @@ +

requires

+ + +

TlsRequires

+ +```python +TlsRequires(self, endpoint_name, relation_ids=None) +``` + +The client's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + +The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert +[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain +[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert +[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert +[server_certs]: requires.md#requires.TlsRequires.server_certs +[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map +[client_certs]: requires.md#requires.TlsRequires.server_certs + +

application_certs

+ + +List of [Certificate][] instances for all available application certs. + +

client_certs

+ + +List of [Certificate][] instances for all available client certs. + +

client_certs_map

+ + +Mapping of client [Certificate][] instances by their `common_name`. + +

root_ca_cert

+ + +Root CA certificate. + +

root_ca_chain

+ + +The chain of trust for the root CA. + +

server_certs

+ + +List of [Certificate][] instances for all available server certs. + +

server_certs_map

+ + +Mapping of server [Certificate][] instances by their `common_name`. + +

get_ca

+ +```python +TlsRequires.get_ca() +``` + +Return the root CA certificate. + +Same as [root_ca_cert][]. + +

get_chain

+ +```python +TlsRequires.get_chain() +``` + +Return the chain of trust for the root CA. + +Same as [root_ca_chain][]. + +

get_client_cert

+ +```python +TlsRequires.get_client_cert() +``` + +Deprecated. Use [request_client_cert][] and the [client_certs][] +collection instead. + +Return a globally shared client certificate and key. + +

get_server_cert

+ +```python +TlsRequires.get_server_cert() +``` + +Deprecated. Use the [server_certs][] collection instead. + +Return the cert and key of the first server certificate requested. + +

get_batch_requests

+ +```python +TlsRequires.get_batch_requests() +``` + +Deprecated. Use [server_certs_map][] instead. + +Mapping of server [Certificate][] instances by their `common_name`. + +

request_server_cert

+ +```python +TlsRequires.request_server_cert(cn, sans=None, cert_name=None) +``` + +Request a server certificate and key be generated for the given +common name (`cn`) and optional list of alternative names (`sans`). + +The `cert_name` is deprecated and not needed. + +This can be called multiple times to request more than one server +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

add_request_server_cert

+ +```python +TlsRequires.add_request_server_cert(cn, sans) +``` + +Deprecated. Use [request_server_cert][] instead. + +

request_server_certs

+ +```python +TlsRequires.request_server_certs() +``` + +Deprecated. Just use [request_server_cert][]; this does nothing. + +

request_client_cert

+ +```python +TlsRequires.request_client_cert(cn, sans) +``` + +Request a client certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans`). + +This can be called multiple times to request more than one client +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

request_application_cert

+ +```python +TlsRequires.request_application_cert(cn, sans) +``` + +Request an application certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans` ) of this +unit and all peer units. All units will share a single certificates. + diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/interface.yaml b/kubernetes-control-plane/hooks/relations/tls-certificates/interface.yaml new file mode 100644 index 0000000..beec53b --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/interface.yaml @@ -0,0 +1,6 @@ +name: tls-certificates +summary: | + A Transport Layer Security (TLS) charm layer that uses requires and provides + to exchange certifcates. +version: 1 +repo: https://github.com/juju-solutions/interface-tls-certificates diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/make_docs b/kubernetes-control-plane/hooks/relations/tls-certificates/make_docs new file mode 100644 index 0000000..2f2274a --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/make_docs @@ -0,0 +1,23 @@ +#!.tox/py3/bin/python + +import sys +import importlib +from pathlib import Path +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'cert': {'interface': 'tls-certificates'}}, + 'provides': {'cert': {'interface': 'tls-certificates'}}, + } + sys.path.append('..') + sys.modules[''] = importlib.import_module(Path.cwd().name) + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/provides.py b/kubernetes-control-plane/hooks/relations/tls-certificates/provides.py new file mode 100644 index 0000000..0262baa --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/provides.py @@ -0,0 +1,301 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag + +from .tls_certificates_common import ( + ApplicationCertificateRequest, + CertificateRequest +) + + +class TlsProvides(Endpoint): + """ + The provider's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [all_requests]: provides.md#provides.TlsProvides.all_requests + [new_requests]: provides.md#provides.TlsProvides.new_requests + [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests + [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + toggle_flag(self.expand_name('{endpoint_name}.certs.requested'), + self.new_requests) + toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'), + self.new_client_requests) + toggle_flag( + self.expand_name('{endpoint_name}.application.certs.requested'), + self.new_application_requests) + # For backwards compatibility, set the old "cert" flags as well + toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'), + self.new_client_requests) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + clear_flag(self.expand_name('{endpoint_name}.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.server.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.client.certs.requested')) + clear_flag( + self.expand_name('{endpoint_name}.application.certs.requested')) + + def set_ca(self, certificate_authority): + """ + Publish the CA to all related applications. + """ + for relation in self.relations: + # All the clients get the same CA, so send it to them. + relation.to_publish_raw['ca'] = certificate_authority + + def set_chain(self, chain): + """ + Publish the chain of trust to all related applications. + """ + for relation in self.relations: + # All the clients get the same chain, so send it to them. + relation.to_publish_raw['chain'] = chain + + def set_client_cert(self, cert, key): + """ + Deprecated. This is only for backwards compatibility. + + Publish a globally shared client cert and key. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'client.cert': cert, + 'client.key': key, + }) + + def set_server_cert(self, scope, cert, key): + """ + Deprecated. Use one of the [new_requests][] collections and + `request.set_cert()` instead. + + Set the server cert and key for the request identified by `scope`. + """ + request = self.get_server_requests()[scope] + request.set_cert(cert, key) + + def set_server_multicerts(self, scope): + """ + Deprecated. Done automatically. + """ + pass + + def add_server_cert(self, scope, cn, cert, key): + ''' + Deprecated. Use `request.set_cert()` instead. + ''' + self.set_server_cert(scope, cert, key) + + def get_server_requests(self): + """ + Deprecated. Use the [new_requests][] or [server_requests][] + collections instead. + + One provider can have many requests to generate server certificates. + Return a map of all server request objects indexed by a unique + identifier. + """ + return {req._key: req for req in self.new_server_requests} + + @property + def all_requests(self): + """ + List of all requests that have been made. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('certs.regen', + 'tls.certs.available') + def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + requests = [] + for unit in self.all_joined_units: + # handle older single server cert request + if unit.received_raw['common_name']: + requests.append(CertificateRequest( + unit, + 'server', + unit.received_raw['certificate_name'], + unit.received_raw['common_name'], + unit.received['sans'], + )) + + # handle mutli server cert requests + reqs = unit.received['cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'server', + common_name, + common_name, + req['sans'], + )) + + # handle client cert requests + reqs = unit.received['client_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'client', + common_name, + common_name, + req['sans'], + )) + # handle application cert requests + reqs = unit.received['application_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(ApplicationCertificateRequest( + unit, + 'application', + common_name, + common_name, + req['sans'] + )) + return requests + + @property + def new_requests(self): + """ + Filtered view of [all_requests][] that only includes requests that + haven't been handled. + + Each will be an instance of [CertificateRequest][]. + + This collection can also be further filtered by request type using + [new_server_requests][] or [new_client_requests][]. + + Example usage: + + ```python + @when('tls.certs.requested') + def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.all_requests if not req.is_handled] + + @property + def new_server_requests(self): + """ + Filtered view of [new_requests][] that only includes server cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.server.certs.requested') + def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'server'] + + @property + def new_client_requests(self): + """ + Filtered view of [new_requests][] that only includes client cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.client.certs.requested') + def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'client'] + + @property + def new_application_requests(self): + """ + Filtered view of [new_requests][] that only includes application cert + requests. + + Each will be an instance of [ApplicationCertificateRequest][]. + + Example usage: + + ```python + @when('tls.application.certs.requested') + def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + + :returns: List of certificate requests. + :rtype: [CertificateRequest, ] + """ + return [req for req in self.new_requests + if req.cert_type == 'application'] + + @property + def all_published_certs(self): + """ + List of all [Certificate][] instances that this provider has published + for all related applications. + """ + return [req.cert for req in self.all_requests if req.cert] diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/pydocmd.yml b/kubernetes-control-plane/hooks/relations/tls-certificates/pydocmd.yml new file mode 100644 index 0000000..c568913 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/pydocmd.yml @@ -0,0 +1,19 @@ +site_name: 'TLS Certificates Interface' + +generate: + - requires.md: + - requires + - requires.TlsRequires+ + - provides.md: + - provides + - provides.TlsProvides+ + - common.md: + - tls_certificates_common.CertificateRequest+ + - tls_certificates_common.Certificate+ + +pages: + - Requires: requires.md + - Provides: provides.md + - Common: common.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/requires.py b/kubernetes-control-plane/hooks/relations/tls-certificates/requires.py new file mode 100644 index 0000000..951f953 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/requires.py @@ -0,0 +1,342 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +import uuid + +from charmhelpers.core import hookenv + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag +from charms.reactive import Endpoint +from charms.reactive import data_changed + +from .tls_certificates_common import Certificate + + +class TlsRequires(Endpoint): + """ + The client's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert + [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain + [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert + [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert + [server_certs]: requires.md#requires.TlsRequires.server_certs + [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map + [client_certs]: requires.md#requires.TlsRequires.server_certs + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + self.relations[0].to_publish_raw['unit_name'] = self._unit_name + prefix = self.expand_name('{endpoint_name}.') + ca_available = self.root_ca_cert + ca_changed = ca_available and data_changed(prefix + 'ca', + self.root_ca_cert) + server_available = self.server_certs + server_changed = server_available and data_changed(prefix + 'servers', + self.server_certs) + client_available = self.client_certs + client_changed = client_available and data_changed(prefix + 'clients', + self.client_certs) + certs_available = server_available or client_available + certs_changed = server_changed or client_changed + + set_flag(prefix + 'available') + toggle_flag(prefix + 'ca.available', ca_available) + toggle_flag(prefix + 'ca.changed', ca_changed) + toggle_flag(prefix + 'server.certs.available', server_available) + toggle_flag(prefix + 'server.certs.changed', server_changed) + toggle_flag(prefix + 'client.certs.available', client_available) + toggle_flag(prefix + 'client.certs.changed', client_changed) + toggle_flag(prefix + 'certs.available', certs_available) + toggle_flag(prefix + 'certs.changed', certs_changed) + # deprecated + toggle_flag(prefix + 'server.cert.available', self.server_certs) + toggle_flag(prefix + 'client.cert.available', self.get_client_cert()) + toggle_flag(prefix + 'batch.cert.available', self.server_certs) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + prefix = self.expand_name('{endpoint_name}.') + clear_flag(prefix + 'available') + clear_flag(prefix + 'ca.available') + clear_flag(prefix + 'ca.changed') + clear_flag(prefix + 'server.certs.available') + clear_flag(prefix + 'server.certs.changed') + clear_flag(prefix + 'client.certs.available') + clear_flag(prefix + 'client.certs.changed') + clear_flag(prefix + 'certs.available') + clear_flag(prefix + 'certs.changed') + # deprecated + clear_flag(prefix + 'server.cert.available') + clear_flag(prefix + 'client.cert.available') + clear_flag(prefix + 'batch.cert.available') + + @property + def _unit_name(self): + return hookenv.local_unit().replace('/', '_') + + @property + def root_ca_cert(self): + """ + Root CA certificate. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['ca'] + + def get_ca(self): + """ + Return the root CA certificate. + + Same as [root_ca_cert][]. + """ + return self.root_ca_cert + + @property + def root_ca_chain(self): + """ + The chain of trust for the root CA. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['chain'] + + def get_chain(self): + """ + Return the chain of trust for the root CA. + + Same as [root_ca_chain][]. + """ + return self.root_ca_chain + + def get_client_cert(self): + """ + Deprecated. Use [request_client_cert][] and the [client_certs][] + collection instead. + + Return a globally shared client certificate and key. + """ + data = self.all_joined_units.received_raw + return (data['client.cert'], data['client.key']) + + def get_server_cert(self): + """ + Deprecated. Use the [server_certs][] collection instead. + + Return the cert and key of the first server certificate requested. + """ + if not self.server_certs: + return (None, None) + cert = self.server_certs[0] + return (cert.cert, cert.key) + + @property + def server_certs(self): + """ + List of [Certificate][] instances for all available server certs. + """ + certs = [] + raw_data = self.all_joined_units.received_raw + json_data = self.all_joined_units.received + + # for backwards compatibility, the first cert goes in its own fields + if self.relations: + common_name = self.relations[0].to_publish_raw['common_name'] + cert = raw_data['{}.server.cert'.format(self._unit_name)] + key = raw_data['{}.server.key'.format(self._unit_name)] + if cert and key: + certs.append(Certificate('server', + common_name, + cert, + key)) + + # subsequent requests go in the collection + field = '{}.processed_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + certs.extend(Certificate('server', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()) + return certs + + @property + def application_certs(self): + """ + List containg the application Certificate cert. + + :returns: A list containing one certificate + :rtype: [Certificate()] + """ + certs = [] + json_data = self.all_joined_units.received + field = '{}.processed_application_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + app_cert_data = certs_data.get('app_data') + if app_cert_data: + certs = [Certificate( + 'server', + 'app_data', + app_cert_data['cert'], + app_cert_data['key'])] + return certs + + @property + def server_certs_map(self): + """ + Mapping of server [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.server_certs} + + def get_batch_requests(self): + """ + Deprecated. Use [server_certs_map][] instead. + + Mapping of server [Certificate][] instances by their `common_name`. + """ + return self.server_certs_map + + @property + def client_certs(self): + """ + List of [Certificate][] instances for all available client certs. + """ + field = '{}.processed_client_requests'.format(self._unit_name) + certs_data = self.all_joined_units.received[field] or {} + return [Certificate('client', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()] + + @property + def client_certs_map(self): + """ + Mapping of client [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.client_certs} + + def request_server_cert(self, cn, sans=None, cert_name=None): + """ + Request a server certificate and key be generated for the given + common name (`cn`) and optional list of alternative names (`sans`). + + The `cert_name` is deprecated and not needed. + + This can be called multiple times to request more than one server + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + to_publish_raw = self.relations[0].to_publish_raw + if to_publish_raw['common_name'] in (None, '', cn): + # for backwards compatibility, first request goes in its own fields + to_publish_raw['common_name'] = cn + to_publish_json['sans'] = sans or [] + cert_name = to_publish_raw.get('certificate_name') or cert_name + if cert_name is None: + cert_name = str(uuid.uuid4()) + to_publish_raw['certificate_name'] = cert_name + else: + # subsequent requests go in the collection + requests = to_publish_json.get('cert_requests', {}) + requests[cn] = {'sans': sans or []} + to_publish_json['cert_requests'] = requests + + def add_request_server_cert(self, cn, sans): + """ + Deprecated. Use [request_server_cert][] instead. + """ + self.request_server_cert(cn, sans) + + def request_server_certs(self): + """ + Deprecated. Just use [request_server_cert][]; this does nothing. + """ + pass + + def request_client_cert(self, cn, sans): + """ + Request a client certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans`). + + This can be called multiple times to request more than one client + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('client_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['client_cert_requests'] = requests + + def request_application_cert(self, cn, sans): + """ + Request an application certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans` ) of this + unit and all peer units. All units will share a single certificates. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('application_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['application_cert_requests'] = requests diff --git a/kubernetes-control-plane/hooks/relations/tls-certificates/tls_certificates_common.py b/kubernetes-control-plane/hooks/relations/tls-certificates/tls_certificates_common.py new file mode 100644 index 0000000..99a2f8c --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/tls-certificates/tls_certificates_common.py @@ -0,0 +1,302 @@ +from charms.reactive import clear_flag, is_data_changed, data_changed + + +class CertificateRequest(dict): + def __init__(self, unit, cert_type, cert_name, common_name, sans): + self._unit = unit + self._cert_type = cert_type + super().__init__({ + 'certificate_name': cert_name, + 'common_name': common_name, + 'sans': sans, + }) + + @property + def _key(self): + return '.'.join((self._unit.relation.relation_id, + self.unit_name, + self.common_name)) + + def resolve_unit_name(self, unit): + """Return name of unit associated with this request. + + unit_name should be provided in the relation data to ensure + compatability with cross-model relations. If the unit name + is absent then fall back to unit_name attribute of the + unit associated with this request. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: Name of unit + :rtype: str + """ + unit_name = unit.received_raw['unit_name'] + if not unit_name: + unit_name = unit.unit_name + return unit_name + + @property + def unit_name(self): + """Name of this unit. + + :returns: Name of unit + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).replace('/', '_') + + @property + def application_name(self): + """Name of the application which the request came from. + + :returns: Name of application + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).split('/')[0] + + @property + def cert_type(self): + """ + Type of certificate, 'server' or 'client', being requested. + """ + return self._cert_type + + @property + def cert_name(self): + return self['certificate_name'] + + @property + def common_name(self): + return self['common_name'] + + @property + def sans(self): + return self['sans'] + + @property + def _publish_key(self): + if self.cert_type == 'server': + return '{}.processed_requests'.format(self.unit_name) + elif self.cert_type == 'client': + return '{}.processed_client_requests'.format(self.unit_name) + raise ValueError('Unknown cert_type: {}'.format(self.cert_type)) + + @property + def _server_cert_key(self): + return '{}.server.cert'.format(self.unit_name) + + @property + def _server_key_key(self): + return '{}.server.key'.format(self.unit_name) + + @property + def _is_top_level_server_cert(self): + return (self.cert_type == 'server' and + self.common_name == self._unit.received_raw['common_name']) + + @property + def cert(self): + """ + The cert published for this request, if any. + """ + cert, key = None, None + if self._is_top_level_server_cert: + tpr = self._unit.relation.to_publish_raw + cert = tpr[self._server_cert_key] + key = tpr[self._server_key_key] + else: + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get(self.common_name, {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + def set_cert(self, cert, key): + rel = self._unit.relation + if self._is_top_level_server_cert: + # backwards compatibility; if this is the cert that was requested + # as a single server cert, set it in the response as the single + # server cert + rel.to_publish_raw.update({ + self._server_cert_key: cert, + self._server_key_key: key, + }) + else: + data = rel.to_publish.get(self._publish_key, {}) + data[self.common_name] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[self._publish_key] = data + if not rel.endpoint.new_server_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.server' + '.cert.requested')) + if not rel.endpoint.new_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.' + 'certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class ApplicationCertificateRequest(CertificateRequest): + """ + A request for an application consistent certificate. + + This is a request for a certificate that works for all units of an + application. All sans and cns are added together to produce one + certificate and the same certificate and key are sent to all the + units of an application. Only one ApplicationCertificateRequest + is needed per application. + """ + + @property + def _key(self): + """Key to identify this cert. + + :returns: cert key + :rtype: str + """ + return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert') + + @property + def cert(self): + """ + The cert published for this request, if any. + + :returns: Certificate + :rtype: Certificate or None + """ + cert, key = None, None + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get('app_data', {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + """Whether the certificate has been handled. + + :returns: If the cert has been handled + :rtype: bool + """ + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + @property + def sans(self): + """Generate a list of all sans from all units of application + + Examine all units of the application and compile a list of + all sans. CNs are treated as addition san entries. + + :returns: List of sans + :rtype: List[str] + """ + _sans = [] + for unit in self._unit.relation.units: + reqs = unit.received['application_cert_requests'] or {} + for cn, req in reqs.items(): + _sans.append(cn) + _sans.extend(req['sans']) + return sorted(list(set(_sans))) + + @property + def _request_key(self): + """Key used to request cert + + :returns: Key used to request cert + :rtype: str + """ + return 'application_cert_requests' + + def derive_publish_key(self, unit=None): + """Derive the application cert publish key for a unit. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: publish key + :rtype: str + """ + if not unit: + unit = self._unit + unit_name = self.resolve_unit_name(unit).replace('/', '_') + return '{}.processed_application_requests'.format(unit_name) + + @property + def _publish_key(self): + """Key used to publish cert + + :returns: Key used to publish cert + :rtype: str + """ + return self.derive_publish_key(unit=self._unit) + + def set_cert(self, cert, key): + """Send the cert and key to all units of the application + + :param cert: TLS Certificate + :type cert: str + :param key: TLS Private Key + :type cert: str + """ + rel = self._unit.relation + for unit in self._unit.relation.units: + pub_key = self.derive_publish_key(unit=unit) + data = rel.to_publish.get( + pub_key, + {}) + data['app_data'] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[pub_key] = data + if not rel.endpoint.new_application_requests: + clear_flag(rel.endpoint.expand_name( + '{endpoint_name}.application.certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class Certificate(dict): + """ + Represents a created certificate and key. + + The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can + be accessed either as properties or as the contents of the dict. + """ + def __init__(self, cert_type, common_name, cert, key): + super().__init__({ + 'cert_type': cert_type, + 'common_name': common_name, + 'cert': cert, + 'key': key, + }) + + @property + def cert_type(self): + return self['cert_type'] + + @property + def common_name(self): + return self['common_name'] + + @property + def cert(self): + return self['cert'] + + @property + def key(self): + return self['key'] diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/.gitignore b/kubernetes-control-plane/hooks/relations/vault-kv/.gitignore new file mode 100644 index 0000000..9dd3eb8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/.gitignore @@ -0,0 +1,2 @@ +.tox +.testrepository diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/README.md b/kubernetes-control-plane/hooks/relations/vault-kv/README.md new file mode 100644 index 0000000..f09c312 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/README.md @@ -0,0 +1,52 @@ +# Overview + +This interface handles the communication with the vault charm using the +vault-kv interface type. + +Vault will enable simple KV based secrets backends with AppRole based +authentication and policies to allow consuming charms to store and retrieve +secrets in Vault. + +Access to the backend will be limited to the network address binding of +of the relation endpoint name and ownership of a secret\_id which the +consuming application must retrieve using a one-shot token out-of-band +from Juju. + +# Usage + +## Requires + +The interface layer will set the following reactive states, as appropriate: + + * `{relation_name}.connected` The relation is established and ready for + the local charm to make a request for access to a secrets backend using + the `request_secret_backend` method. + + * `{relation_name}.available` When vault has created the backend and an + associated AppRole to allow the local charm to store and retrieve secrets + in vault - the `vault_url` and `unit_role_id` properties will be set. + + For example: + +```python +from charms.reactive.flags import endpoint_from_flag + + @when('secrets-storage.connected') + def ss_connected(): + secrets = endpoint_from_flag('secrets-storage.connected') + secrets.request_secret_backend('charm-vaultlocker', isolated=True) + + + @when('secrets-storage.available') + def ss_ready_for_use(): + secrets = endpoint_from_flag('secrets-storage.connected') + configure_my_local_service( + vault_url=secrets.vault_url, + role_id=secrets.unit_role_id, + secret_id=vault.get_response(secrets.unit_token), + backend='charm-vaultlocker', + ) + ``` + + Note that the backend name must be prefixed with 'charm-' otherwise the vault + charm will skip creation of the secrets backend and associated access. diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/__init__.py b/kubernetes-control-plane/hooks/relations/vault-kv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/copyright b/kubernetes-control-plane/hooks/relations/vault-kv/copyright new file mode 100644 index 0000000..32a8f52 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/copyright @@ -0,0 +1,21 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2018, Canonical Ltd. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/interface.yaml b/kubernetes-control-plane/hooks/relations/vault-kv/interface.yaml new file mode 100644 index 0000000..b03cb19 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/interface.yaml @@ -0,0 +1,4 @@ +name: vault-kv +summary: Vault simple Key/Value secret storage interface +version: 1 +maintainer: "James Page " diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/provides.py b/kubernetes-control-plane/hooks/relations/vault-kv/provides.py new file mode 100644 index 0000000..8039448 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/provides.py @@ -0,0 +1,94 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import is_flag_set, toggle_flag, clear_flag +from charms.reactive import Endpoint +from charmhelpers import core as ch_core + + +class VaultKVProvides(Endpoint): + def manage_flags(self): + any_fields_changed = False + for field in ('access_address', + 'secret_backend', + 'hostname', + 'isolated'): + flag = self.expand_name('endpoint.{endpoint_name}.' + 'changed.{}').format(field) + any_fields_changed = any_fields_changed or is_flag_set(flag) + clear_flag(flag) + toggle_flag(self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.new-request'), + any_fields_changed) + + def publish_url(self, vault_url, remote_binding=None): + """ Publish URL for Vault to all Relations + + :param vault_url: api url used by remote client to speak to vault. + :param remote_binding: Deprecated + """ + if remote_binding: + ch_core.hookenv.log( + "Use of remote_binding in publish_url is deprecated. " + "See LP Bug #1895185", "WARNING") + for relation in self.relations: + relation.to_publish['vault_url'] = vault_url + + def publish_ca(self, vault_ca): + """ Publish SSL CA for Vault to all Relations """ + for relation in self.relations: + relation.to_publish['vault_ca'] = vault_ca + + def get_remote_unit_name(self, unit): + """Get the remote units name. + + :param unit: Unit to get name for. + :type name: Unit + :returns: Unit name + :rtype: str + """ + return unit.received.get('unit_name') or unit.unit_name + + def set_role_id(self, unit, role_id, token): + """ Set the AppRole ID and token for out-of-band Secret ID retrieval + for a specific remote unit """ + # for cmr we will need to the other end to provide their unit name + # expicitly. + unit_name = self.get_remote_unit_name(unit) + unit.relation.to_publish['{}_role_id'.format(unit_name)] = role_id + unit.relation.to_publish['{}_token'.format(unit_name)] = token + + def requests(self): + """ Retrieve full set of setup requests from all remote units """ + requests = [] + for relation in self.relations: + for unit in relation.units: + access_address = unit.received['access_address'] + ingress_address = unit.received['ingress-address'] + secret_backend = unit.received['secret_backend'] + hostname = unit.received['hostname'] + isolated = unit.received['isolated'] + unit_name = self.get_remote_unit_name(unit) + if not (secret_backend and access_address and + hostname and isolated is not None): + continue + requests.append({ + 'unit': unit, + 'unit_name': unit_name, + 'access_address': access_address, + 'ingress_address': ingress_address, + 'secret_backend': secret_backend, + 'hostname': hostname, + 'isolated': isolated, + }) + return requests diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/requires.py b/kubernetes-control-plane/hooks/relations/vault-kv/requires.py new file mode 100644 index 0000000..550d8ef --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/requires.py @@ -0,0 +1,108 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import socket + +from charmhelpers.core import hookenv + +from charms.reactive import toggle_flag +from charms.reactive import Endpoint + + +class VaultKVRequires(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag(self.expand_name('{endpoint_name}.available'), + all([self.is_joined, + self.unit_role_id, + self.unit_token, + self.vault_url])) + + @property + def endpoint_address(self): + """ Determine the local endpoint network address """ + try: + return hookenv.network_get_primary_address( + self.expand_name('{endpoint_name}') + ) + except NotImplementedError: + return hookenv.unit_private_ip() + + def request_secret_backend(self, name, isolated=True): + """Request creation and access to a secret backend + + :param name: name of secret backend to create/access + :type name: str + :param isolated: enforce isolation in backend between units + :type isolated: bool""" + for relation in self.relations: + relation.to_publish['secret_backend'] = name + relation.to_publish['access_address'] = self.endpoint_address + relation.to_publish['hostname'] = socket.gethostname() + relation.to_publish['isolated'] = isolated + relation.to_publish['unit_name'] = hookenv.local_unit() + + @property + def unit_role_id(self): + """Retrieve the AppRole ID for this application unit or None + + :returns role_id: AppRole ID for unit + :rtype role_id: str""" + role_key = '{}_role_id'.format(hookenv.local_unit()) + return self.all_joined_units.received.get(role_key) + + @property + def unit_token(self): + """Retrieve the one-shot token for secret_id retrieval for + this application unit or None + + :returns token: Vault one-shot toekn for secret_id response + :rtype token: str""" + token_key = '{}_token'.format(hookenv.local_unit()) + return self.all_joined_units.received.get(token_key) + + @property + def all_unit_tokens(self): + """Retrieve the one-shot token(s) for secret_id retrieval for + all application units or empty list. + + :returns token: Vault one-shot token for secret_id response + :rtype token: str""" + token_key = '{}_token'.format(hookenv.local_unit()) + tokens = set() + for relation in self.relations: + for unit in relation.units: + token = unit.received.get(token_key) + if token: + tokens.add(token) + + return list(tokens) + + @property + def vault_url(self): + """Retrieve the URL to access Vault + + :returns vault_url: URL to access vault + :rtype vault_url: str""" + return self.all_joined_units.received.get('vault_url') + + @property + def vault_ca(self): + """Retrieve the CA published by Vault + + :returns vault_ca: Vault CA Certificate data + :rtype vault_ca: str""" + encoded_ca = self.all_joined_units.received.get('vault_ca') + if encoded_ca: + return base64.b64decode(encoded_ca) diff --git a/kubernetes-control-plane/hooks/relations/vault-kv/test-requirements.txt b/kubernetes-control-plane/hooks/relations/vault-kv/test-requirements.txt new file mode 100644 index 0000000..db5ef38 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vault-kv/test-requirements.txt @@ -0,0 +1,2 @@ +flake8>=2.2.4 +os-testr>=0.4.1 diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/.gitignore b/kubernetes-control-plane/hooks/relations/vsphere-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/LICENSE b/kubernetes-control-plane/hooks/relations/vsphere-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/README.md b/kubernetes-control-plane/hooks/relations/vsphere-integration/README.md new file mode 100644 index 0000000..28ff438 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `vsphere-integration` interface communication +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:vsphere-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:vsphere-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `vsphere-integration` interface protocol: + +```yaml +requires: + vsphere: + interface: vsphere-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the vsphere-integrator charm) diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/__init__.py b/kubernetes-control-plane/hooks/relations/vsphere-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/copyright b/kubernetes-control-plane/hooks/relations/vsphere-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/provides.md b/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/provides.md new file mode 100644 index 0000000..796b7e6 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/provides.md @@ -0,0 +1,74 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the +vSphere integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for vSphere integration + features. The vSphere integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. + +

VsphereIntegrationProvides

+ +```python +VsphereIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.vsphere.requests-pending') +def handle_requests(): + vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending') + for request in vsphere.requests: + request.set_credentials(layer.vsphere.get_user_credentials()) + vsphere.mark_completed() +``` + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

mark_completed

+ +```python +VsphereIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(self, vsphere_ip, user, password, datacenter, datastore) +``` + +Set the credentials for this request. diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/requires.md b/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/requires.md new file mode 100644 index 0000000..0ce10a9 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/docs/requires.md @@ -0,0 +1,56 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that wish +to request integration with vSphere native features. The integration will be +provided by the vSphere integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of vSphere +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific vSphere features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the vSphere instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +

VsphereIntegrationRequires

+ +```python +VsphereIntegrationRequires(self, endpoint_name, relation_ids=None) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.vsphere.ready') +def vsphere_integration_ready(): + vsphere = endpoint_from_flag('endpoint.vsphere.joined') + update_config_enable_vsphere(vsphere.vsphere_ip, + vsphere.user, + vsphere.password, + vsphere.datacenter, + vsphere.datastore) +``` + +

is_ready

+ + +Whether or not the request for this instance has been completed. diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/interface.yaml b/kubernetes-control-plane/hooks/relations/vsphere-integration/interface.yaml new file mode 100644 index 0000000..c4c0c07 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/interface.yaml @@ -0,0 +1,4 @@ +name: vsphere-integration +summary: Interface for connecting to the VMware vSphere integrator charm. +version: 1 +maintainer: Kevin Monroe diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/make_docs b/kubernetes-control-plane/hooks/relations/vsphere-integration/make_docs new file mode 100644 index 0000000..04cf35b --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'vsphere': {'interface': 'vsphere'}}, + 'provides': {'vsphere': {'interface': 'vsphere'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/provides.py b/kubernetes-control-plane/hooks/relations/vsphere-integration/provides.py new file mode 100644 index 0000000..c3db1d8 --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/provides.py @@ -0,0 +1,132 @@ +""" +This is the provides side of the interface layer, for use only by the +vSphere integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for vSphere integration + features. The vSphere integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class VsphereIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.vsphere.requests-pending') + def handle_requests(): + vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending') + for request in vsphere.requests: + request.set_credentials(layer.vsphere.get_vsphere_credentials()) + request.set_config(layer.vsphere.get_vsphere_config()) + vsphere.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.new_requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def all_requests(self): + """ + A list of all the #IntegrationRequests that have been made. + """ + return [IntegrationRequest(unit) for unit in self.all_joined_units] + + @property + def new_requests(self): + """ + A list of the new or updated #IntegrationRequests that have been made. + """ + is_changed = attrgetter('is_changed') + return list(filter(is_changed, self.all_requests)) + + def mark_completed(self): + """ + Remove the `requests-pending` flag. + """ + clear_flag(self.expand_name('requests-pending')) + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def has_credentials(self): + """ + Whether or not `set_credentials` has been called. + """ + return {'vsphere_ip', 'user', + 'password', 'datacenter'}.issubset(self._to_publish) + + @property + def has_config(self): + """ + Whether or not `set_config` has been called. + """ + return {'datastore', 'folder', + 'respool_path'}.issubset(self._to_publish) + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + return not (self.has_credentials and self.has_config) + + @property + def unit_name(self): + return self._unit.unit_name + + def set_credentials(self, + vsphere_ip, + user, + password, + datacenter): + """ + Set the vsphere credentials for this request. + """ + self._to_publish.update({ + 'vsphere_ip': vsphere_ip, + 'user': user, + 'password': password, + 'datacenter': datacenter, + }) + + def set_config(self, + datastore, + folder, + respool_path): + """ + Set the non-credential vsphere config for this request. + """ + self._to_publish.update({ + 'datastore': datastore, + 'folder': folder, + 'respool_path': respool_path, + }) diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/pydocmd.yml b/kubernetes-control-plane/hooks/relations/vsphere-integration/pydocmd.yml new file mode 100644 index 0000000..e1d5d4a --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'VMware vSphere Integration Interface' + +generate: + - requires.md: + - requires + - requires.VsphereIntegrationRequires+ + - provides.md: + - provides + - provides.VsphereIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-control-plane/hooks/relations/vsphere-integration/requires.py b/kubernetes-control-plane/hooks/relations/vsphere-integration/requires.py new file mode 100644 index 0000000..d8b9cdb --- /dev/null +++ b/kubernetes-control-plane/hooks/relations/vsphere-integration/requires.py @@ -0,0 +1,141 @@ +""" +This is the requires side of the interface layer, for use in charms that wish +to request integration with vSphere native features. The integration will be +provided by the vSphere integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of vSphere +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific vSphere features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the vSphere instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. +""" + + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, is_flag_set, set_flag, toggle_flag +from charms.reactive import data_changed + + +class VsphereIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.vsphere.ready') + def vsphere_integration_ready(): + vsphere = endpoint_from_flag('endpoint.vsphere.joined') + update_config_enable_vsphere(vsphere.vsphere_ip, + vsphere.user, + vsphere.password, + vsphere.datacenter, + vsphere.datastore, + vsphere.folder, + vsphere.respool_path) + ``` + """ + + @property + def _received(self): + """ + Helper to streamline access to received data. + """ + return self.all_joined_units.received + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + """ + Manage flags to signal when the endpoint is ready as well as noting + if changes have been made since it became ready. + """ + was_ready = is_flag_set(self.expand_name('ready')) + toggle_flag(self.expand_name('ready'), self.is_ready) + if self.is_ready and was_ready and self.is_changed: + set_flag(self.expand_name('ready.changed')) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + return all(field is not None for field in [ + self.vsphere_ip, + self.user, + self.password, + self.datacenter, + self.datastore, + self.folder, + self.respool_path, + ]) + + @property + def is_changed(self): + """ + Whether or not the request for this instance has changed. + """ + return data_changed(self.expand_name('all-data'), [ + self.vsphere_ip, + self.user, + self.password, + self.datacenter, + self.datastore, + self.folder, + self.respool_path, + ]) + + @property + def vsphere_ip(self): + return self._received['vsphere_ip'] + + @property + def user(self): + return self._received['user'] + + @property + def password(self): + return self._received['password'] + + @property + def datacenter(self): + return self._received['datacenter'] + + @property + def datastore(self): + return self._received['datastore'] + + @property + def folder(self): + return self._received['folder'] + + @property + def respool_path(self): + return self._received['respool_path'] diff --git a/kubernetes-control-plane/hooks/start b/kubernetes-control-plane/hooks/start new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/start @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/stop b/kubernetes-control-plane/hooks/stop new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/stop @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/update-status b/kubernetes-control-plane/hooks/update-status new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/update-status @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/upgrade-charm b/kubernetes-control-plane/hooks/upgrade-charm new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/upgrade-charm @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vault-kv-relation-broken b/kubernetes-control-plane/hooks/vault-kv-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vault-kv-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vault-kv-relation-changed b/kubernetes-control-plane/hooks/vault-kv-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vault-kv-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vault-kv-relation-created b/kubernetes-control-plane/hooks/vault-kv-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vault-kv-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vault-kv-relation-departed b/kubernetes-control-plane/hooks/vault-kv-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vault-kv-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vault-kv-relation-joined b/kubernetes-control-plane/hooks/vault-kv-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vault-kv-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vsphere-relation-broken b/kubernetes-control-plane/hooks/vsphere-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vsphere-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vsphere-relation-changed b/kubernetes-control-plane/hooks/vsphere-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vsphere-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vsphere-relation-created b/kubernetes-control-plane/hooks/vsphere-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vsphere-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vsphere-relation-departed b/kubernetes-control-plane/hooks/vsphere-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vsphere-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/hooks/vsphere-relation-joined b/kubernetes-control-plane/hooks/vsphere-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-control-plane/hooks/vsphere-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-control-plane/icon.svg b/kubernetes-control-plane/icon.svg new file mode 100644 index 0000000..d4e9f39 --- /dev/null +++ b/kubernetes-control-plane/icon.svg @@ -0,0 +1,106 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + control + plane + + + diff --git a/kubernetes-control-plane/layer.yaml b/kubernetes-control-plane/layer.yaml new file mode 100644 index 0000000..8d90517 --- /dev/null +++ b/kubernetes-control-plane/layer.yaml @@ -0,0 +1,97 @@ +"includes": +- "layer:options" +- "layer:basic" +- "layer:debug" +- "interface:tls-certificates" +- "interface:nrpe-external-master" +- "layer:cis-benchmark" +- "layer:coordinator" +- "layer:kubernetes-common" +- "interface:container-runtime" +- "interface:vault-kv" +- "layer:status" +- "layer:apt" +- "layer:vault-kv" +- "interface:hacluster" +- "layer:snap" +- "layer:tls-client" +- "layer:leadership" +- "layer:metrics" +- "layer:nagios" +- "layer:cdk-service-kicker" +- "layer:kubernetes-node-base" +- "layer:vaultlocker" +- "layer:hacluster" +- "interface:ceph-admin" +- "interface:ceph-client" +- "interface:etcd" +- "interface:http" +- "interface:kubernetes-cni" +- "interface:kube-dns" +- "interface:kube-control" +- "interface:kube-masters" +- "interface:public-address" +- "interface:aws-integration" +- "interface:gcp-integration" +- "interface:openstack-integration" +- "interface:vsphere-integration" +- "interface:azure-integration" +- "interface:keystone-credentials" +- "interface:prometheus-manual" +- "interface:grafana-dashboard" +- "interface:aws-iam" +"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests", + ".tox", "__pycache__", "Makefile", "conftest.py"] +"options": + "coordinator": + # Absolute path to the charmhelpers.coordinator.BaseCoordinator to use. + "class": "charms.coordinator.SimpleCoordinator" + # Layer log level (debug, info, warning, error, critical) + "log_level": "info" + "basic": + "packages": + - "socat" + "python_packages": [] + "use_venv": !!bool "true" + "include_system_packages": !!bool "false" + "tls-client": + "ca_certificate_path": "/root/cdk/ca.crt" + "server_certificate_path": "" + "server_key_path": "" + "client_certificate_path": "" + "client_key_path": "" + "cdk-service-kicker": + "services": + - "snap.kube-apiserver.daemon" + - "snap.kube-controller-manager.daemon" + - "snap.kube-scheduler.daemon" + - "snap.kube-proxy.daemon" + "hacluster": + "binding_address": "kube-api-endpoint" + "snap": {} + "debug": {} + "leadership": {} + "nagios": {} + "cis-benchmark": {} + "kubernetes-common": {} + "kubernetes-node-base": {} + "vault-kv": {} + "status": + "patch-hookenv": !!bool "true" + "apt": + "packages": [] + "version_package": "" + "full_version": !!bool "false" + "keys": [] + "vaultlocker": {} + "kubernetes-control-plane": {} +"repo": "https://github.com/kubernetes/kubernetes.git" +"proof": + "storage": + - "name": "vaultlocker-encrypt" + "type": "Boolean" + "missing": !!bool "false" + - "name": "vaultlocker-mountbase" + "type": "String" + "missing": "" +"is": "kubernetes-control-plane" diff --git a/kubernetes-control-plane/lib/charms/apt.py b/kubernetes-control-plane/lib/charms/apt.py new file mode 100644 index 0000000..14508c4 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/apt.py @@ -0,0 +1,209 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import itertools +import re +import subprocess + +from charmhelpers import fetch +from charmhelpers.core import hookenv, unitdata +from charms import layer, reactive +from charms.layer import status +from charms.reactive import flags + + +__all__ = ['add_source', 'update', 'queue_install', 'install_queued', 'installed', 'purge', 'ensure_package_status'] + + +def add_source(source, key=None): + '''Add an apt source. + + Sets the apt.needs_update flag. + + A source may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + + The package signing key should be an ASCII armoured GPG key. While + GPG key ids are also supported, the retrieval mechanism is insecure. + There is no need to specify the package signing key for PPAs or for + the main Ubuntu archives. + ''' + # Maybe we should remember which sources have been added already + # so we don't waste time re-adding them. Is this time significant? + fetch.add_source(source, key) + reactive.set_flag('apt.needs_update') + + +def queue_install(packages, options=None): + """Queue one or more deb packages for install. + + The `apt.installed.{name}` flag is set once the package is installed. + + If a package has already been installed it will not be reinstalled. + + If a package has already been queued it will not be requeued, and + the install options will not be changed. + + Sets the apt.queued_installs flag. + """ + if isinstance(packages, str): + packages = [packages] + # Filter installed packages. + store = unitdata.kv() + queued_packages = store.getrange('apt.install_queue.', strip=True) + packages = { + package: options + for package in packages + if not (package in queued_packages or reactive.is_flag_set('apt.installed.' + package)) + } + if packages: + unitdata.kv().update(packages, prefix='apt.install_queue.') + reactive.set_flag('apt.queued_installs') + + +def installed(): + '''Return the set of deb packages completed install''' + return set(flag.split('.', 2)[2] for flag in flags.get_flags() if flag.startswith('apt.installed.')) + + +def purge(packages): + """Purge one or more deb packages from the system""" + fetch.apt_purge(packages, fatal=True) + store = unitdata.kv() + store.unsetrange(packages, prefix='apt.install_queue.') + for package in packages: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def update(): + """Update the apt cache. + + Removes the apt.needs_update flag. + """ + status.maintenance('Updating apt cache') + fetch.apt_update(fatal=True) # Friends don't let friends set fatal=False + reactive.clear_flag('apt.needs_update') + + +def install_queued(): + '''Installs queued deb packages. + + Removes the apt.queued_installs flag and sets the apt.installed flag. + + On failure, sets the unit's workload status to 'blocked' and returns + False. Package installs remain queued. + + On success, sets the apt.installed.{packagename} flag for each + installed package and returns True. + ''' + store = unitdata.kv() + queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items()) + + installed = set() + for options, batch in itertools.groupby(queue, lambda x: x[0]): + packages = [b[1] for b in batch] + try: + status.maintenance('Installing {}'.format(','.join(packages))) + fetch.apt_install(packages, options, fatal=True) + store.unsetrange(packages, prefix='apt.install_queue.') + installed.update(packages) + except subprocess.CalledProcessError: + status.blocked('Unable to install packages {}'.format(','.join(packages))) + return False # Without setting reactive flag. + + for package in installed: + reactive.set_flag('apt.installed.{}'.format(package)) + reactive.clear_flag('apt.queued_installs') + + reset_application_version() + + return True + + +def get_package_version(package, full_version=False): + '''Return the version of an installed package. + + If `full_version` is True, returns the full Debian package version. + Otherwise, returns the shorter 'upstream' version number. + ''' + # Don't use fetch.get_upstream_version, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off. + cmd = ['dpkg-query', '--show', r'--showformat=${Version}\n', package] + full = subprocess.check_output(cmd, universal_newlines=True).strip() + if not full_version: + # Attempt to strip off Debian style metadata from the end of the + # version number. + m = re.search(r'^([\d.a-z]+)', full, re.I) + if m is not None: + return m.group(1) + return full + + +def reset_application_version(): + '''Set the Juju application version, per settings in layer.yaml''' + # Reset the application version. We call this after installing + # packages to initialize the version. We also call this every + # hook, incase the version has changed (eg. Landscape upgraded + # the package). + opts = layer.options().get('apt', {}) + pkg = opts.get('version_package') + if pkg and pkg in installed(): + ver = get_package_version(pkg, opts.get('full_version', False)) + hookenv.application_version_set(ver) + + +def ensure_package_status(): + '''Hold or unhold packages per the package_status configuration option. + + All packages installed using this module and handlers are affected. + + An mechanism may be added in the future to override this for a + subset of installed packages. + ''' + packages = installed() + if not packages: + return + config = hookenv.config() + package_status = config.get('package_status') or '' + changed = reactive.data_changed('apt.package_status', (package_status, sorted(packages))) + if changed: + if package_status == 'hold': + hookenv.log('Holding packages {}'.format(','.join(packages))) + fetch.apt_hold(packages) + else: + hookenv.log('Unholding packages {}'.format(','.join(packages))) + fetch.apt_unhold(packages) + reactive.clear_flag('apt.needs_hold') + + +def status_set(state, message): + '''DEPRECATED, set the unit's workload status. + + Set state == None to keep the same state and just change the message. + ''' + if state is None: + state = hookenv.status_get()[0] + if state not in ('active', 'waiting', 'blocked'): + state = 'maintenance' # Guess + status.status_set(state, message) diff --git a/kubernetes-control-plane/lib/charms/coordinator.py b/kubernetes-control-plane/lib/charms/coordinator.py new file mode 100644 index 0000000..b954b92 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/coordinator.py @@ -0,0 +1,144 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import importlib + +from charmhelpers.coordinator import BaseCoordinator +from charmhelpers.core import hookenv +from charms import reactive +import charms.layer + + +__all__ = ['coordinator', 'acquire'] + + +def acquire(lock): + """ + Sets either the coordinator.granted.{lockname} or + coordinator.requested.{lockname} state. + + Returns True if the lock could be immediately granted. + + If locks cannot be granted immediately, they will be granted + in a future hook and the coordinator.granted.{lockname} state set. + """ + global coordinator + if coordinator.acquire(lock): + s = 'coordinator.granted.{}'.format(lock) + if not reactive.is_state(s): + log('Granted {} lock'.format(lock), hookenv.DEBUG) + reactive.set_state('coordinator.granted.{}'.format(lock)) + return True + else: + log('Requested {} lock'.format(lock), hookenv.DEBUG) + reactive.set_state('coordinator.requested.{}'.format(lock)) + return False + + +options = charms.layer.options('coordinator') + + +def log(msg, level=hookenv.INFO): + lmap = {hookenv.DEBUG: 1, + hookenv.INFO: 2, + hookenv.WARNING: 3, + hookenv.ERROR: 4, + hookenv.CRITICAL: 5} + if lmap[level] >= lmap[options.get('log_level', 'DEBUG').upper()]: + hookenv.log('Coordinator: {}'.format(msg), level) + + +class SimpleCoordinator(BaseCoordinator): + '''A simple BaseCoordinator that is suitable for almost all cases. + + Only one unit at a time will be granted locks. All requests by that + unit will be granted. So only one unit may run tasks guarded by a lock, + and the lock name is irrelevant. + ''' + def default_grant(self, lock, unit, granted, queue): + '''Grant locks to only one unit at a time, regardless of the lock name. + + This lets us keep separate locks like join and restart, + while ensuring the operations do not occur on different nodes + at the same time. + ''' + existing_grants = {k: v for k, v in self.grants.items() if v} + + # Return True if this unit has already been granted any lock. + if existing_grants.get(unit): + self.msg('Granting {} to {} (existing grants)'.format(lock, unit), + hookenv.INFO) + return True + + # Return False if another unit has been granted any lock. + if existing_grants: + self.msg('Not granting {} to {} (locks held by {})' + ''.format(lock, unit, ','.join(existing_grants.keys())), + hookenv.INFO) + return False + + # Otherwise, return True if the unit is first in the queue for + # this named lock. + if queue[0] == unit: + self.msg('Granting {} to {} (first in queue)' + ''.format(lock, unit), hookenv.INFO) + return True + else: + self.msg('Not granting {} to {} (not first in queue)' + ''.format(lock, unit), hookenv.INFO) + return False + + def msg(self, msg, level=hookenv.DEBUG): + '''Emit a message.''' + log(msg, level) + + def _save_state(self): + # If the leader aquired a lock, and now released it, + # there may be outstanding requests in the queue from other + # units. We need to grant them now, as we have no guarantee + # of another hook running on the leader for some time (until + # update-status). + self.handle() + super(SimpleCoordinator, self)._save_state() + + +def _instantiate(): + default_name = 'charms.coordinator.SimpleCoordinator' + full_name = options.get('class', default_name) + components = full_name.split('.') + module = '.'.join(components[:-1]) + name = components[-1] + + if not module: + module = 'charms.coordinator' + + class_ = getattr(importlib.import_module(module), name) + + assert issubclass(class_, BaseCoordinator), \ + '{} is not a BaseCoordinator subclass'.format(full_name) + + try: + # The Coordinator layer defines its own peer relation, as it + # can't piggy back on an existing peer relation that may not + # exist. + return class_(peer_relation_name='coordinator') + finally: + log('Using {} coordinator'.format(full_name), hookenv.DEBUG) + + +# Instantiate the BaseCoordinator singleton, which installs +# its charmhelpers.core.atstart() hooks. +coordinator = _instantiate() diff --git a/kubernetes-control-plane/lib/charms/layer/__init__.py b/kubernetes-control-plane/lib/charms/layer/__init__.py new file mode 100644 index 0000000..a8e0c64 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/__init__.py @@ -0,0 +1,60 @@ +import sys +from importlib import import_module +from pathlib import Path + + +def import_layer_libs(): + """ + Ensure that all layer libraries are imported. + + This makes it possible to do the following: + + from charms import layer + + layer.foo.do_foo_thing() + + Note: This function must be called after bootstrap. + """ + for module_file in Path('lib/charms/layer').glob('*'): + module_name = module_file.stem + if module_name in ('__init__', 'basic', 'execd') or not ( + module_file.suffix == '.py' or module_file.is_dir() + ): + continue + import_module('charms.layer.{}'.format(module_name)) + + +# Terrible hack to support the old terrible interface. +# Try to get people to call layer.options.get() instead so +# that we can remove this garbage. +# Cribbed from https://stackoverfLow.com/a/48100440/4941864 +class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__): + def __call__(self, section=None, layer_file=None): + if layer_file is None: + return self.get(section=section) + else: + return self.get(section=section, + layer_file=Path(layer_file)) + + +def patch_options_interface(): + from charms.layer import options + if sys.version_info.minor >= 5: + options.__class__ = OptionsBackwardsCompatibilityHack + else: + # Py 3.4 doesn't support changing the __class__, so we have to do it + # another way. The last line is needed because we already have a + # reference that doesn't get updated with sys.modules. + name = options.__name__ + hack = OptionsBackwardsCompatibilityHack(name) + hack.get = options.get + sys.modules[name] = hack + sys.modules[__name__].options = hack + + +try: + patch_options_interface() +except ImportError: + # This may fail if pyyaml hasn't been installed yet. But in that + # case, the bootstrap logic will try it again once it has. + pass diff --git a/kubernetes-control-plane/lib/charms/layer/basic.py b/kubernetes-control-plane/lib/charms/layer/basic.py new file mode 100644 index 0000000..9122f7c --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/basic.py @@ -0,0 +1,508 @@ +import os +import sys +import re +import shutil +from distutils.version import LooseVersion +from pkg_resources import Requirement +from glob import glob +from subprocess import check_call, check_output, CalledProcessError +from time import sleep + +from charms import layer +from charms.layer.execd import execd_preinstall + + +def _get_subprocess_env(): + env = os.environ.copy() + env['LANG'] = env.get('LANG', 'C.UTF-8') + return env + + +def get_series(): + """ + Return series for a few known OS:es. + Tested as of 2019 november: + * centos6, centos7, rhel6. + * bionic + """ + series = "" + + # Looking for content in /etc/os-release + # works for ubuntu + some centos + if os.path.isfile('/etc/os-release'): + d = {} + with open('/etc/os-release', 'r') as rel: + for l in rel: + if not re.match(r'^\s*$', l): + k, v = l.split('=') + d[k.strip()] = v.strip().replace('"', '') + series = "{ID}{VERSION_ID}".format(**d) + + # Looking for content in /etc/redhat-release + # works for redhat enterprise systems + elif os.path.isfile('/etc/redhat-release'): + with open('/etc/redhat-release', 'r') as redhatlsb: + # CentOS Linux release 7.7.1908 (Core) + line = redhatlsb.readline() + release = int(line.split("release")[1].split()[0][0]) + series = "centos" + str(release) + + # Looking for content in /etc/lsb-release + # works for ubuntu + elif os.path.isfile('/etc/lsb-release'): + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + series = d['DISTRIB_CODENAME'] + + # This is what happens if we cant figure out the OS. + else: + series = "unknown" + return series + + +def bootstrap_charm_deps(): + """ + Set up the base charm dependencies so that the reactive system can run. + """ + # execd must happen first, before any attempt to install packages or + # access the network, because sites use this hook to do bespoke + # configuration and install secrets so the rest of this bootstrap + # and the charm itself can actually succeed. This call does nothing + # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d. + execd_preinstall() + # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts + + series = get_series() + + # OMG?! is build-essentials needed? + ubuntu_packages = ['python3-pip', + 'python3-setuptools', + 'python3-yaml', + 'python3-dev', + 'python3-wheel', + 'build-essential'] + + # I'm not going to "yum group info "Development Tools" + # omitting above madness + centos_packages = ['python3-pip', + 'python3-setuptools', + 'python3-devel', + 'python3-wheel'] + + packages_needed = [] + if 'centos' in series: + packages_needed = centos_packages + else: + packages_needed = ubuntu_packages + + charm_dir = os.environ['JUJU_CHARM_DIR'] + os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin') + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpip = os.path.join(vbin, 'pip') + vpy = os.path.join(vbin, 'python') + hook_name = os.path.basename(sys.argv[0]) + is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped') + is_charm_upgrade = hook_name == 'upgrade-charm' + is_series_upgrade = hook_name == 'post-series-upgrade' + is_post_upgrade = os.path.exists('wheelhouse/.upgraded') + is_upgrade = (not is_post_upgrade and + (is_charm_upgrade or is_series_upgrade)) + if is_bootstrapped and not is_upgrade: + # older subordinates might have downgraded charm-env, so we should + # restore it if necessary + install_or_update_charm_env() + activate_venv() + # the .upgrade file prevents us from getting stuck in a loop + # when re-execing to activate the venv; at this point, we've + # activated the venv, so it's safe to clear it + if is_post_upgrade: + os.unlink('wheelhouse/.upgraded') + return + if os.path.exists(venv): + try: + # focal installs or upgrades prior to PR 160 could leave the venv + # in a broken state which would prevent subsequent charm upgrades + _load_installed_versions(vpip) + except CalledProcessError: + is_broken_venv = True + else: + is_broken_venv = False + if is_upgrade or is_broken_venv: + # All upgrades should do a full clear of the venv, rather than + # just updating it, to bring in updates to Python itself + shutil.rmtree(venv) + if is_upgrade: + if os.path.exists('wheelhouse/.bootstrapped'): + os.unlink('wheelhouse/.bootstrapped') + # bootstrap wheelhouse + if os.path.exists('wheelhouse'): + pre_eoan = series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty', + 'ubuntu16.04', 'xenial', + 'ubuntu18.04', 'bionic') + pydistutils_lines = [ + "[easy_install]\n", + "find_links = file://{}/wheelhouse/\n".format(charm_dir), + "no_index=True\n", + "index_url=\n", # deliberately nothing here; disables it. + ] + if pre_eoan: + pydistutils_lines.append("allow_hosts = ''\n") + with open('/root/.pydistutils.cfg', 'w') as fp: + # make sure that easy_install also only uses the wheelhouse + # (see https://github.com/pypa/pip/issues/410) + fp.writelines(pydistutils_lines) + if 'centos' in series: + yum_install(packages_needed) + else: + apt_install(packages_needed) + from charms.layer import options + cfg = options.get('basic') + # include packages defined in layer.yaml + if 'centos' in series: + yum_install(cfg.get('packages', [])) + else: + apt_install(cfg.get('packages', [])) + # if we're using a venv, set it up + if cfg.get('use_venv'): + if not os.path.exists(venv): + series = get_series() + if series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty'): + apt_install(['python-virtualenv']) + elif 'centos' in series: + yum_install(['python-virtualenv']) + else: + apt_install(['virtualenv']) + cmd = ['virtualenv', '-ppython3', '--never-download', venv] + if cfg.get('include_system_packages'): + cmd.append('--system-site-packages') + check_call(cmd, env=_get_subprocess_env()) + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + pip = vpip + else: + pip = 'pip3' + # save a copy of system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip'): + shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save') + pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm'] + # we bundle these packages to work around bugs in older versions (such + # as https://github.com/pypa/pip/issues/56), but if the system already + # provided a newer version, downgrading it can cause other problems + _update_if_newer(pip, pre_install_pkgs) + # install the rest of the wheelhouse deps (extract the pkg names into + # a set so that we can ignore the pre-install packages and let pip + # choose the best version in case there are multiple from layer + # conflicts) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # Jinja2 3+ relies on MarkupSafe actually being installed prior to + # attempting to be installed from the wheelhouse. Thus, if MarkupSafe + # and/or wheel are in _pkgs, then install them first. + _pre_packages = [p for p in _pkgs if p in ('wheel', 'MarkupSafe')] + _pkgs = [p for p in _pkgs if p not in _pre_packages] + for _pkgs_set in (_pre_packages, _pkgs): + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs_set, _versions) + reinstall_flag = '--force-reinstall' + # if not cfg.get('use_venv', True) and pre_eoan: + if not cfg.get('use_venv', True): + reinstall_flag = '--ignore-installed' + check_call([pip, 'install', '-U', reinstall_flag, '--no-index', + '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs), + env=_get_subprocess_env()) + # re-enable installation from pypi + os.remove('/root/.pydistutils.cfg') + + # install pyyaml for centos7, since, unlike the ubuntu image, the + # default image for centos doesn't include pyyaml; see the discussion: + # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin + if 'centos' in series: + check_call([pip, 'install', '-U', 'pyyaml'], + env=_get_subprocess_env()) + + # install python packages from layer options + if cfg.get('python_packages'): + check_call([pip, 'install', '-U'] + cfg.get('python_packages'), + env=_get_subprocess_env()) + if not cfg.get('use_venv'): + # restore system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip.save'): + shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip') + os.remove('/usr/bin/pip.save') + # setup wrappers to ensure envs are used for scripts + install_or_update_charm_env() + for wrapper in ('charms.reactive', 'charms.reactive.sh', + 'chlp', 'layer_option'): + src = os.path.join('/usr/local/sbin', 'charm-env') + dst = os.path.join('/usr/local/sbin', wrapper) + if not os.path.exists(dst): + os.symlink(src, dst) + if cfg.get('use_venv'): + shutil.copy2('bin/layer_option', vbin) + else: + shutil.copy2('bin/layer_option', '/usr/local/bin/') + # re-link the charm copy to the wrapper in case charms + # call bin/layer_option directly (as was the old pattern) + os.remove('bin/layer_option') + os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option') + # flag us as having already bootstrapped so we don't do it again + open('wheelhouse/.bootstrapped', 'w').close() + if is_upgrade: + # flag us as having already upgraded so we don't do it again + open('wheelhouse/.upgraded', 'w').close() + # Ensure that the newly bootstrapped libs are available. + # Note: this only seems to be an issue with namespace packages. + # Non-namespace-package libs (e.g., charmhelpers) are available + # without having to reload the interpreter. :/ + reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0]) + + +def _load_installed_versions(pip): + pip_freeze = check_output([pip, 'freeze']).decode('utf8') + versions = {} + for pkg_ver in pip_freeze.splitlines(): + try: + req = Requirement.parse(pkg_ver) + except ValueError: + continue + versions.update({ + req.project_name: LooseVersion(ver) + for op, ver in req.specs if op == '==' + }) + return versions + + +def _load_wheelhouse_versions(): + versions = {} + for wheel in glob('wheelhouse/*'): + pkg, ver = os.path.basename(wheel).rsplit('-', 1) + # nb: LooseVersion ignores the file extension + versions[pkg.replace('_', '-')] = LooseVersion(ver) + return versions + + +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + +def _update_if_newer(pip, pkgs): + installed = _load_installed_versions(pip) + wheelhouse = _load_wheelhouse_versions() + for pkg in pkgs: + if pkg not in installed or wheelhouse[pkg] > installed[pkg]: + check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse', + pkg], env=_get_subprocess_env()) + + +def install_or_update_charm_env(): + # On Trusty python3-pkg-resources is not installed + try: + from pkg_resources import parse_version + except ImportError: + apt_install(['python3-pkg-resources']) + from pkg_resources import parse_version + + try: + installed_version = parse_version( + check_output(['/usr/local/sbin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + installed_version = parse_version('0.0.0') + try: + bundled_version = parse_version( + check_output(['bin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + bundled_version = parse_version('0.0.0') + if installed_version < bundled_version: + shutil.copy2('bin/charm-env', '/usr/local/sbin/') + + +def activate_venv(): + """ + Activate the venv if enabled in ``layer.yaml``. + + This is handled automatically for normal hooks, but actions might + need to invoke this manually, using something like: + + # Load modules from $JUJU_CHARM_DIR/lib + import sys + sys.path.append('lib') + + from charms.layer.basic import activate_venv + activate_venv() + + This will ensure that modules installed in the charm's + virtual environment are available to the action. + """ + from charms.layer import options + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpy = os.path.join(vbin, 'python') + use_venv = options.get('basic', 'use_venv') + if use_venv and '.venv' not in sys.executable: + # activate the venv + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + reload_interpreter(vpy) + layer.patch_options_interface() + layer.import_layer_libs() + + +def reload_interpreter(python): + """ + Reload the python interpreter to ensure that all deps are available. + + Newly installed modules in namespace packages sometimes seemt to + not be picked up by Python 3. + """ + os.execve(python, [python] + list(sys.argv), os.environ) + + +def apt_install(packages): + """ + Install apt packages. + + This ensures a consistent set of options that are often missed but + should really be set. + """ + if isinstance(packages, (str, bytes)): + packages = [packages] + + env = _get_subprocess_env() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + cmd = ['apt-get', + '--option=Dpkg::Options::=--force-confold', + '--assume-yes', + 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: # third attempt + raise + try: + # sometimes apt-get update needs to be run + check_call(['apt-get', 'update'], env=env) + except CalledProcessError: + # sometimes it's a dpkg lock issue + pass + sleep(5) + else: + break + + +def yum_install(packages): + """ Installs packages with yum. + This function largely mimics the apt_install function for consistency. + """ + if packages: + env = os.environ.copy() + cmd = ['yum', '-y', 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: + raise + try: + check_call(['yum', 'update'], env=env) + except CalledProcessError: + pass + sleep(5) + else: + break + else: + pass + + +def init_config_states(): + import yaml + from charmhelpers.core import hookenv + from charms.reactive import set_state + from charms.reactive import toggle_state + config = hookenv.config() + config_defaults = {} + config_defs = {} + config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml') + if os.path.exists(config_yaml): + with open(config_yaml) as fp: + config_defs = yaml.safe_load(fp).get('options', {}) + config_defaults = {key: value.get('default') + for key, value in config_defs.items()} + for opt in config_defs.keys(): + if config.changed(opt): + set_state('config.changed') + set_state('config.changed.{}'.format(opt)) + toggle_state('config.set.{}'.format(opt), config.get(opt)) + toggle_state('config.default.{}'.format(opt), + config.get(opt) == config_defaults[opt]) + + +def clear_config_states(): + from charmhelpers.core import hookenv, unitdata + from charms.reactive import remove_state + config = hookenv.config() + remove_state('config.changed') + for opt in config.keys(): + remove_state('config.changed.{}'.format(opt)) + remove_state('config.set.{}'.format(opt)) + remove_state('config.default.{}'.format(opt)) + unitdata.kv().flush() diff --git a/kubernetes-control-plane/lib/charms/layer/execd.py b/kubernetes-control-plane/lib/charms/layer/execd.py new file mode 100644 index 0000000..438d9a1 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/execd.py @@ -0,0 +1,114 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of layer-basic, the reactive base layer for Juju. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# This module may only import from the Python standard library. +import os +import sys +import subprocess +import time + +''' +execd/preinstall + +Read the layer-basic docs for more info on how to use this feature. +https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support +''' + + +def default_execd_dir(): + return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_sentinel_path(submodule_path): + module_path = os.path.dirname(submodule_path) + execd_path = os.path.dirname(module_path) + module_name = os.path.basename(module_path) + submodule_name = os.path.basename(submodule_path) + return os.path.join(execd_path, + '.{}_{}.done'.format(module_name, submodule_name)) + + +def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None): + """Run command for each module within execd_dir which defines it.""" + if stderr is None: + stderr = sys.stdout + for submodule_path in execd_submodule_paths(command, execd_dir): + # Only run each execd once. We cannot simply run them in the + # install hook, as potentially storage hooks are run before that. + # We cannot rely on them being idempotent. + sentinel = execd_sentinel_path(submodule_path) + if os.path.exists(sentinel): + continue + + try: + subprocess.check_call([submodule_path], stderr=stderr, + universal_newlines=True) + with open(sentinel, 'w') as f: + f.write('{} ran successfully {}\n'.format(submodule_path, + time.ctime())) + f.write('Removing this file will cause it to be run again\n') + except subprocess.CalledProcessError as e: + # Logs get the details. We can't use juju-log, as the + # output may be substantial and exceed command line + # length limits. + print("ERROR ({}) running {}".format(e.returncode, e.cmd), + file=stderr) + print("STDOUT<>> `get_version('kubelet') + (1, 6, 0) + + """ + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def retry(times, delay_secs): + """Decorator for retrying a method call. + + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """Decorator to wrap the function provided. + + Args: + func: Provided function should return either True od False + + Returns: A callable that would return the last call outcome + + """ + + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + + return _wrapped + + return retry_decorator + + +def calculate_resource_checksum(resource): + """Calculate a checksum for a resource""" + md5 = hashlib.md5() + path = hookenv.resource_get(resource) + if path: + with open(path, "rb") as f: + data = f.read() + md5.update(data) + return md5.hexdigest() + + +def get_resource_checksum_db_key(checksum_prefix, resource): + """Convert a resource name to a resource checksum database key.""" + return checksum_prefix + resource + + +def migrate_resource_checksums(checksum_prefix, snap_resources): + """Migrate resource checksums from the old schema to the new one""" + for resource in snap_resources: + new_key = get_resource_checksum_db_key(checksum_prefix, resource) + if not db.get(new_key): + path = hookenv.resource_get(resource) + if path: + # old key from charms.reactive.helpers.any_file_changed + old_key = "reactive.files_changed." + path + old_checksum = db.get(old_key) + db.set(new_key, old_checksum) + else: + # No resource is attached. Previously, this meant no checksum + # would be calculated and stored. But now we calculate it as if + # it is a 0-byte resource, so let's go ahead and do that. + zero_checksum = hashlib.md5().hexdigest() + db.set(new_key, zero_checksum) + + +def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + hookenv.status_set("maintenance", "Checking resources") + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + old_checksum = db.get(key) + new_checksum = calculate_resource_checksum(resource) + if new_checksum != old_checksum: + return True + return False + + +def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + checksum = calculate_resource_checksum(resource) + db.set(key, checksum) + + +def get_ingress_address(endpoint_name, ignore_addresses=None): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + # if they don't have ingress-addresses they are running a juju that + # doesn't support spaces, so just return the private address + return hookenv.unit_get("private-address") + + excluded_ips = [] + excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"] + for addr in network_info["bind-addresses"]: + for prefix in excluded_interfaces: + if addr["interface-name"].startswith(prefix): + for ip in addr["addresses"]: + excluded_ips.append(ip["value"]) + + ingress_addresses = network_info["ingress-addresses"] + network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips] + + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) + + # Need to prefer non-fan IP addresses due to various issues, e.g. + # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 + # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll + # prioritize those last. Not technically correct, but good enough. + try: + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 + addresses = sorted(addresses, key=sort_key) + except Exception: + hookenv.log(traceback.format_exc()) + + return addresses[0] + + +def get_ingress_address6(endpoint_name): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + return None + + addresses = network_info["ingress-addresses"] + + for addr in addresses: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version == 6: + return str(ip_addr) + else: + return None + + +def service_restart(service_name): + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) + host.service_restart(service_name) + + +def service_start(service_name): + hookenv.log("Starting {0} service.".format(service_name)) + host.service_stop(service_name) + + +def service_stop(service_name): + hookenv.log("Stopping {0} service.".format(service_name)) + host.service_stop(service_name) + + +def arch(): + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" + # Get the package architecture for this system. + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode("utf-8") + return architecture + + +def get_service_ip(service, namespace="kube-system", errors_fatal=True): + try: + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) + except CalledProcessError: + if errors_fatal: + raise + else: + return None + else: + svc = json.loads(output.decode()) + return svc["spec"]["clusterIP"] + + +def kubectl(*args): + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) + return check_output(command) + + +def kubectl_success(*args): + """Runs kubectl with the given args. Returns True if successful, False if + not.""" + try: + kubectl(*args) + return True + except CalledProcessError: + return False + + +def kubectl_manifest(operation, manifest): + """Wrap the kubectl creation command when using filepath resources + :param operation - one of get, create, delete, replace + :param manifest - filepath to the manifest + """ + # Deletions are a special case + if operation == "delete": + # Ensure we immediately remove requested resources with --now + return kubectl_success(operation, "-f", manifest, "--now") + else: + # Guard against an error re-creating the same manifest multiple times + if operation == "create": + # If we already have the definition, its probably safe to assume + # creation was true. + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) + return True + # Execute the requested command that did not match any of the special + # cases above + return kubectl_success(operation, "-f", manifest) + + +def get_node_name(): + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": + return getfqdn().lower() + else: + return gethostname().lower() + + +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied + arguments for values of the Kubernetes server, CA, key, certificate, user + context and cluster.""" + if not key and not certificate and not password and not token: + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") + + # Create the config file with the address of the master server. + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) + # Delete old users + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) + # Create the credentials using the client flags. + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) + + if key and certificate: + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) + # Create a default context with the cluster. + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) + # Make the config use this new context. + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) + if keystone: + # create keystone user + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) + check_call(split(cmd)) + # create keystone context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) + check_call(split(cmd)) + # use keystone context + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) + check_call(split(cmd)) + # manually add exec command until kubectl can do it for us + with open(new_kubeconfig, "r") as f: + content = f.read() + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user + user: + exec: + command: "/snap/bin/client-keystone-auth" + apiVersion: "client.authentication.k8s.io/v1beta1" +""", + ) + with open(new_kubeconfig, "w") as f: + f.write(content) + if aws_iam_cluster_id: + # create aws-iam context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) + + # append a user for aws-iam + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) + + # not going to use aws-iam context by default since we don't have + # the desired arn. This will make the config not usable if copied. + + # cmd = 'kubectl config --kubeconfig={0} ' \ + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) + # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) + + +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, "").split() + args = {} + + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + + return args + + +def configure_kubernetes_service(key, service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = key + service + prev_snap_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + args.update(base_args) + args.update(extra_args) + + # CIS benchmark action may inject kv config to pass failing tests. Merge + # these after the func args as they should take precedence. + cis_args_key = "cis-" + service + cis_args = db.get(cis_args_key) or {} + args.update(cis_args) + + # Remove any args with 'None' values (all k8s args are 'k=v') and + # construct an arg string for use by 'snap set'. + args = {k: v for k, v in args.items() if v is not None} + args = ['--%s="%s"' % arg for arg in args.items()] + args = " ".join(args) + + snap_opts = {} + for arg in prev_snap_args: + # remove previous args by setting to null + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] + + cmd = ["snap", "set", service] + snap_opts + check_call(cmd) + + # Now that we've started doing snap configuration through the "args" + # option, we should never need to clear previous args again. + db.set(prev_args_key, {}) + + +def _snap_common_path(component): + return Path("/var/snap/{}/common".format(component)) + + +def cloud_config_path(component): + return _snap_common_path(component) / "cloud-config.conf" + + +def _gcp_creds_path(component): + return _snap_common_path(component) / "gcp-creds.json" + + +def _daemon_env_path(component): + return _snap_common_path(component) / "environment" + + +def _cloud_endpoint_ca_path(component): + return _snap_common_path(component) / "cloud-endpoint-ca.crt" + + +def encryption_config_path(): + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" + + +def write_gcp_snap_config(component): + # gcp requires additional credentials setup + gcp = endpoint_from_flag("endpoint.gcp.ready") + creds_path = _gcp_creds_path(component) + with creds_path.open("w") as fp: + os.fchmod(fp.fileno(), 0o600) + fp.write(gcp.credentials) + + # create a cloud-config file that sets token-url to nil to make the + # services use the creds env var instead of the metadata server, as + # well as making the cluster multizone + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) + + daemon_env_path = _daemon_env_path(component) + if daemon_env_path.exists(): + daemon_env = daemon_env_path.read_text() + if not daemon_env.endswith("\n"): + daemon_env += "\n" + else: + daemon_env = "" + if gcp_creds_env_key not in daemon_env: + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) + daemon_env_path.parent.mkdir(parents=True, exist_ok=True) + daemon_env_path.write_text(daemon_env) + + +def generate_openstack_cloud_config(): + # openstack requires additional credentials setup + openstack = endpoint_from_flag("endpoint.openstack.ready") + + lines = [ + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), + ] + if openstack.endpoint_tls_ca: + lines.append("ca-file = /etc/config/endpoint-ca.cert") + + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) + + if openstack.has_octavia in (True, None): + # Newer integrator charm will detect whether underlying OpenStack has + # Octavia enabled so we can set this intelligently. If we're still + # related to an older integrator, though, default to assuming Octavia + # is available. + lines.append("use-octavia = true") + else: + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") + if openstack.subnet_id: + lines.append("subnet-id = {}".format(openstack.subnet_id)) + if openstack.floating_network_id: + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) + if openstack.lb_method: + lines.append("lb-method = {}".format(openstack.lb_method)) + if openstack.internal_lb: + lines.append("internal-lb = true") + if openstack.manage_security_groups: + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") + if openstack.bs_version is not None: + lines.append("bs-version = {}".format(openstack.bs_version)) + if openstack.trust_device_path is not None: + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) + if openstack.ignore_volume_az is not None: + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" + + +def write_azure_snap_config(component): + azure = endpoint_from_flag("endpoint.azure.ready") + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) + + +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): + kube_proxy_opts = {} + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" + num_apis = len(api_servers) + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() + if bind_address: + kube_proxy_opts["bind-address"] = bind_address + elif is_ipv6(cluster_cidr): + kube_proxy_opts["bind-address"] = "::" + + if host.is_container(): + kube_proxy_opts["conntrack-max-per-core"] = "0" + + feature_gates = [] + + if is_dual_stack(cluster_cidr): + feature_gates.append("IPv6DualStack=true") + + if is_state("endpoint.aws.ready"): + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.azure.ready"): + feature_gates.append("CSIMigrationAzureDisk=false") + + kube_proxy_opts["feature-gates"] = ",".join(feature_gates) + + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) + + +def get_unit_number(): + return int(hookenv.local_unit().split("/")[1]) + + +def cluster_cidr(): + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") + if not cni: + return None + config = hookenv.config() + if "default-cni" in config: + # master + default_cni = config["default-cni"] + else: + # worker + kube_control = endpoint_from_flag("kube-control.dns.available") + if not kube_control: + return None + default_cni = kube_control.get_default_cni() + return cni.get_config(default=default_cni)["cidr"] + + +def is_dual_stack(cidrs): + """Detect IPv4/IPv6 dual stack from CIDRs""" + return {net.version for net in get_networks(cidrs)} == {4, 6} + + +def is_ipv4(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv4_network(cidrs) is not None + + +def is_ipv6(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv6_network(cidrs) is not None + + +def is_ipv6_preferred(cidrs): + """Detect if IPv6 is preffered from CIDRs""" + return get_networks(cidrs)[0].version == 6 + + +def get_networks(cidrs): + """Convert a comma-separated list of CIDRs to a list of networks.""" + if not cidrs: + return [] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] + + +def get_ipv4_network(cidrs): + """Get the IPv4 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(4) + + +def get_ipv6_network(cidrs): + """Get the IPv6 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(6) + + +def enable_ipv6_forwarding(): + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) + + +def _as_address(addr_str): + try: + return ipaddress.ip_address(addr_str) + except ValueError: + return None + + +def get_bind_addrs(ipv4=True, ipv6=True): + try: + output = check_output(["ip", "-j", "-br", "addr", "show", "scope", "global"]) + except CalledProcessError: + # stderr will have any details, and go to the log + hookenv.log("Unable to determine global addresses", hookenv.ERROR) + return [] + + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") + accept_versions = set() + if ipv4: + accept_versions.add(4) + if ipv6: + accept_versions.add(6) + + addrs = [] + for addr in json.loads(output.decode("utf8")): + if addr["operstate"].upper() != "UP" or any( + addr["ifname"].startswith(prefix) for prefix in ignore_interfaces + ): + continue + + for ifc in addr["addr_info"]: + local_addr = _as_address(ifc.get("local")) + if local_addr and local_addr.version in accept_versions: + addrs.append(str(local_addr)) + + return addrs + + +class InvalidVMwareHost(Exception): + pass + + +def _get_vmware_uuid(): + serial_id_file = "/sys/class/dmi/id/product_serial" + # The serial id from VMWare VMs comes in following format: + # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 + try: + with open(serial_id_file, "r") as f: + serial_string = f.read().strip() + if "VMware-" not in serial_string: + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) + raise InvalidVMwareHost + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) + uuid = "%s-%s-%s-%s-%s" % ( + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) + except IOError as err: + hookenv.log("Unable to read UUID from sysfs: {}".format(err)) + uuid = "UNKNOWN" + + return uuid + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None + + +def get_node_ip(): + """Determines the preferred NodeIP value for this node.""" + cidr = cluster_cidr() + if not cidr: + return None + if is_ipv6_preferred(cidr): + return get_ingress_address6("kube-control") + else: + return get_ingress_address("kube-control") + + +def merge_kubelet_extra_config(config, extra_config): + """Updates config to include the contents of extra_config. This is done + recursively to allow deeply nested dictionaries to be merged. + + This is destructive: it modifies the config dict that is passed in. + """ + for k, extra_config_value in extra_config.items(): + if isinstance(extra_config_value, dict): + config_value = config.setdefault(k, {}) + merge_kubelet_extra_config(config_value, extra_config_value) + else: + config[k] = extra_config_value + + +def workaround_lxd_kernel_params(): + """ + Workaround for kubelet not starting in LXD when kernel params are not set + to the desired values. + """ + if host.is_container(): + hookenv.log("LXD detected, faking kernel params via bind mounts") + root_dir = "/root/cdk/lxd-kernel-params" + os.makedirs(root_dir, exist_ok=True) + # Kernel params taken from: + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/kubelet/cm/container_manager_linux.go#L421-L426 + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/util/sysctl/sysctl.go#L30-L64 + params = { + "vm.overcommit_memory": 1, + "vm.panic_on_oom": 0, + "kernel.panic": 10, + "kernel.panic_on_oops": 1, + "kernel.keys.root_maxkeys": 1000000, + "kernel.keys.root_maxbytes": 1000000 * 25, + } + for param, param_value in params.items(): + fake_param_path = root_dir + "/" + param + with open(fake_param_path, "w") as f: + f.write(str(param_value)) + real_param_path = "/proc/sys/" + param.replace(".", "/") + host.fstab_add(fake_param_path, real_param_path, "none", "bind") + subprocess.check_call(["mount", "-a"]) + else: + hookenv.log("LXD not detected, not faking kernel params") + + +def get_sandbox_image_uri(registry): + return "{}/pause:3.6".format(registry) + + +def configure_kubelet(dns_domain, dns_ip, registry, taints=None, has_xcp=False): + kubelet_opts = {} + kubelet_opts["kubeconfig"] = kubelet_kubeconfig_path + kubelet_opts["v"] = "0" + kubelet_opts["logtostderr"] = "true" + kubelet_opts["node-ip"] = get_node_ip() + + container_runtime = endpoint_from_flag("endpoint.container-runtime.available") + + kubelet_opts["container-runtime"] = container_runtime.get_runtime() + if kubelet_opts["container-runtime"] == "remote": + kubelet_opts["container-runtime-endpoint"] = container_runtime.get_socket() + + feature_gates = {} + + kubelet_cloud_config_path = cloud_config_path("kubelet") + if has_xcp: + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.aws.ready"): + kubelet_opts["cloud-provider"] = "aws" + feature_gates["CSIMigrationAWS"] = False + elif is_state("endpoint.gcp.ready"): + kubelet_opts["cloud-provider"] = "gce" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + feature_gates["CSIMigrationGCE"] = False + elif is_state("endpoint.openstack.ready"): + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.vsphere.joined"): + # vsphere just needs to be joined on the worker (vs 'ready') + kubelet_opts["cloud-provider"] = "vsphere" + # NB: vsphere maps node product-id to its uuid (no config file needed). + uuid = _get_vmware_uuid() + kubelet_opts["provider-id"] = "vsphere://{}".format(uuid) + elif is_state("endpoint.azure.ready"): + azure = endpoint_from_flag("endpoint.azure.ready") + kubelet_opts["cloud-provider"] = "azure" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + kubelet_opts["provider-id"] = azure.vm_id + feature_gates["CSIMigrationAzureDisk"] = False + + # Put together the KubeletConfiguration data + kubelet_config = { + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + "address": "0.0.0.0", + "authentication": { + "anonymous": {"enabled": False}, + "x509": {"clientCAFile": str(ca_crt_path)}, + }, + # NB: authz webhook config tells the kubelet to ask the api server + # if a request is authorized; it is not related to the authn + # webhook config of the k8s master services. + "authorization": {"mode": "Webhook"}, + "clusterDomain": dns_domain, + "failSwapOn": False, + "port": 10250, + "protectKernelDefaults": True, + "readOnlyPort": 0, + "tlsCertFile": str(server_crt_path), + "tlsPrivateKeyFile": str(server_key_path), + } + if dns_ip: + kubelet_config["clusterDNS"] = [dns_ip] + + # Handle feature gates + if get_version("kubelet") >= (1, 19): + # NB: required for CIS compliance + feature_gates["RotateKubeletServerCertificate"] = True + if is_state("kubernetes-worker.gpu.enabled"): + feature_gates["DevicePlugins"] = True + if feature_gates: + kubelet_config["featureGates"] = feature_gates + if is_dual_stack(cluster_cidr()): + feature_gates = kubelet_config.setdefault("featureGates", {}) + feature_gates["IPv6DualStack"] = True + + # Workaround for DNS on bionic + # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 + resolv_path = os.path.realpath("/etc/resolv.conf") + if resolv_path == "/run/systemd/resolve/stub-resolv.conf": + kubelet_config["resolvConf"] = "/run/systemd/resolve/resolv.conf" + + # Add kubelet-extra-config. This needs to happen last so that it + # overrides any config provided by the charm. + kubelet_extra_config = hookenv.config("kubelet-extra-config") + kubelet_extra_config = yaml.safe_load(kubelet_extra_config) + merge_kubelet_extra_config(kubelet_config, kubelet_extra_config) + + # Render the file and configure Kubelet to use it + os.makedirs("/root/cdk/kubelet", exist_ok=True) + with open("/root/cdk/kubelet/config.yaml", "w") as f: + f.write("# Generated by kubernetes-worker charm, do not edit\n") + yaml.dump(kubelet_config, f) + kubelet_opts["config"] = "/root/cdk/kubelet/config.yaml" + + # If present, ensure kubelet gets the pause container from the configured + # registry. When not present, kubelet uses a default image location + # (currently k8s.gcr.io/pause:3.4.1). + if registry: + kubelet_opts["pod-infra-container-image"] = get_sandbox_image_uri(registry) + + if taints: + kubelet_opts["register-with-taints"] = ",".join(taints) + + workaround_lxd_kernel_params() + + configure_kubernetes_service( + "kubernetes-common.prev-args.", "kubelet", kubelet_opts, "kubelet-extra-args" + ) + + +def configure_default_cni(default_cni): + """Set the default CNI configuration to be used by CNI clients + (kubelet, containerd). + + CNI clients choose whichever CNI config in /etc/cni/net.d/ is + alphabetically first, so we accomplish this by creating a file named + /etc/cni/net.d/05-default.conflist, which is alphabetically earlier than + typical CNI config names, e.g. 10-flannel.conflist and 10-calico.conflist + + The created 05-default.conflist file is a symlink to whichever CNI config + is actually going to be used. + """ + # Clean up current default + cni_conf_dir = "/etc/cni/net.d" + for filename in os.listdir(cni_conf_dir): + if filename.startswith("05-default."): + os.remove(cni_conf_dir + "/" + filename) + + # Set new default + cni = endpoint_from_flag("cni.available") + cni_conf = cni.get_config(default=default_cni) + source = cni_conf["cni-conf-file"] + dest = cni_conf_dir + "/" + "05-default." + source.split(".")[-1] + os.symlink(source, dest) diff --git a/kubernetes-control-plane/lib/charms/layer/kubernetes_control_plane.py b/kubernetes-control-plane/lib/charms/layer/kubernetes_control_plane.py new file mode 100644 index 0000000..6b885a4 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/kubernetes_control_plane.py @@ -0,0 +1,496 @@ +import csv +import json +import random +import socket +import string +from pathlib import Path +import ipaddress +from subprocess import check_output, CalledProcessError, TimeoutExpired +from time import sleep +from yaml import safe_load + +from charmhelpers.core import host +from charmhelpers.core import hookenv +from charmhelpers.core.templating import render +from charmhelpers.core import unitdata +from charmhelpers.fetch import apt_install +from charms.reactive import endpoint_from_flag, endpoint_from_name, is_flag_set +from charms.layer import kubernetes_common +from charms.layer.kubernetes_common import AUTH_SECRET_NS, create_secret + + +AUTH_BACKUP_EXT = "pre-secrets" +AUTH_BASIC_FILE = "/root/cdk/basic_auth.csv" +AUTH_TOKENS_FILE = "/root/cdk/known_tokens.csv" +EXTERNAL_API_PORT = 443 +STANDARD_API_PORT = 6443 +CEPH_CONF_DIR = Path("/etc/ceph") +CEPH_CONF = CEPH_CONF_DIR / "ceph.conf" +CEPH_KEYRING = CEPH_CONF_DIR / "ceph.client.{}.keyring".format( + hookenv.application_name() +) + +db = unitdata.kv() + + +def get_endpoints_from_config(): + """ + Return a list of any manually configured API endpoints. + """ + ha_connected = is_flag_set("ha.connected") + forced_lb_ips = hookenv.config("loadbalancer-ips").split() + vips = hookenv.config("ha-cluster-vip").split() + dns_record = hookenv.config("ha-cluster-dns") + if forced_lb_ips: + # if the user gave us IPs for the load balancer, assume + # they know what they are talking about and use that + # instead of our information. + return [(address, STANDARD_API_PORT) for address in forced_lb_ips] + elif ha_connected and vips: + return [(vip, STANDARD_API_PORT) for vip in vips] + elif ha_connected and dns_record: + return [(dns_record, STANDARD_API_PORT)] + else: + return [] + + +def get_local_api_endpoint(): + """ + Return the local address & port for self-access. + + Returns a list with a single tuple to match the other functions below. + """ + return [("127.0.0.1", STANDARD_API_PORT)] + + +def get_internal_api_endpoints(relation=None): + """ + Determine the best API endpoints for an internal client to connect to. + + If a relation is given, it will try to take that into account. + + May return an empty list if an endpoint is expected but not yet available. + """ + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + goal_state.setdefault("relations", {}) + + # Config takes precedence. + endpoints_from_config = get_endpoints_from_config() + if endpoints_from_config: + return endpoints_from_config + + # If the internal LB relation is attached, use that or nothing. If it's + # not attached but the external LB relation is, use that or nothing. + for lb_type in ("internal", "external"): + lb_endpoint = "loadbalancer-" + lb_type + request_name = "api-server-" + lb_type + api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT + if lb_endpoint in goal_state["relations"]: + lb_provider = endpoint_from_name(lb_endpoint) + lb_response = lb_provider.get_response(request_name) + if not lb_response or lb_response.error: + return [] + return [(lb_response.address, api_port)] + + # Support the older loadbalancer relation (public-address interface). + if "loadbalancer" in goal_state["relations"]: + loadbalancer = endpoint_from_name("loadbalancer") + lb_addresses = loadbalancer.get_addresses_ports() + return [(host.get("public-address"), host.get("port")) for host in lb_addresses] + + # No LBs of any kind, so fall back to ingress-address. + if not relation: + kube_control = endpoint_from_name("kube-control") + if not kube_control.relations: + return [] + relation = kube_control.relations[0] + ingress_address = hookenv.ingress_address( + relation.relation_id, hookenv.local_unit() + ) + return [(ingress_address, STANDARD_API_PORT)] + + +def get_external_api_endpoints(): + """ + Determine the best API endpoints for an external client to connect to. + + May return an empty list if an endpoint is expected but not yet available. + """ + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + goal_state.setdefault("relations", {}) + + # Config takes precedence. + endpoints_from_config = get_endpoints_from_config() + if endpoints_from_config: + return endpoints_from_config + + # If the external LB relation is attached, use that or nothing. If it's + # not attached but the internal LB relation is, use that or nothing. + for lb_type in ("external", "internal"): + lb_endpoint = "loadbalancer-" + lb_type + lb_name = "api-server-" + lb_type + api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT + if lb_endpoint in goal_state["relations"]: + lb_provider = endpoint_from_name(lb_endpoint) + lb_response = lb_provider.get_response(lb_name) + if not lb_response or lb_response.error: + return [] + return [(lb_response.address, api_port)] + + # Support the older loadbalancer relation (public-address interface). + if "loadbalancer" in goal_state["relations"]: + loadbalancer = endpoint_from_name("loadbalancer") + lb_addresses = loadbalancer.get_addresses_ports() + return [(host.get("public-address"), host.get("port")) for host in lb_addresses] + + # No LBs of any kind, so fall back to public-address. + return [(hookenv.unit_public_ip(), STANDARD_API_PORT)] + + +def get_api_urls(endpoints): + """ + Convert a list of API server endpoints to URLs. + """ + return ["https://{0}:{1}".format(*endpoint) for endpoint in endpoints] + + +def get_api_url(endpoints): + """ + Choose an API endpoint from the list and build a URL from it. + """ + if not endpoints: + return None + urls = get_api_urls(endpoints) + return urls[kubernetes_common.get_unit_number() % len(urls)] + + +def install_ceph_common(): + """Install ceph-common tools. + + :return: None + """ + ceph_client = endpoint_from_flag("ceph-client.available") + + ceph_context = { + "mon_hosts": " ".join(ceph_client.mon_hosts()), + "auth_supported": ceph_client.auth, + "use_syslog": "true", + "ceph_public_network": "", + "ceph_cluster_network": "", + "loglevel": 1, + "hostname": socket.gethostname(), + } + # Install the ceph common utilities. + apt_install(["ceph-common"], fatal=True) + + CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True) + # Render the ceph configuration from the ceph conf template. + render("ceph.conf", str(CEPH_CONF), ceph_context) + + # The key can rotate independently of other ceph config, so validate it. + try: + with open(str(CEPH_KEYRING), "w") as key_file: + key_file.write( + "[client.{}]\n\tkey = {}\n".format( + hookenv.application_name(), ceph_client.key + ) + ) + except IOError as err: + hookenv.log("IOError writing Ceph keyring: {}".format(err)) + + +def ceph_cli(*args, timeout=60): + cmd = ["ceph", "--user", hookenv.application_name()] + list(args) + return check_output(cmd, timeout=timeout).decode("UTF-8") + + +def query_cephfs_enabled(): + try: + out = ceph_cli("mds", "versions", "-c", str(CEPH_CONF)) + return bool(json.loads(out)) + except CalledProcessError: + hookenv.log("Unable to determine if CephFS is enabled", "ERROR") + return False + except TimeoutExpired: + hookenv.log("Timeout attempting to determine if CephFS is enabled", "ERROR") + return False + + +def get_ceph_fsid(): + try: + return ceph_cli("fsid").strip() + except CalledProcessError: + hookenv.log("Unable to get Ceph FSID", "ERROR") + return None + except TimeoutExpired: + hookenv.log("Timeout attempting to get Ceph FSID", "ERROR") + return None + + +def get_cephfs_fsname(): + try: + data = json.loads(ceph_cli("fs", "ls", "-f", "json")) + except TimeoutExpired: + hookenv.log("Timeout attempting to determine fsname", "ERROR") + return None + for fs in data: + if "ceph-fs_data" in fs["data_pools"]: + return fs["name"] + + +def deprecate_auth_file(auth_file): + """ + In 1.19+, file-based authentication was deprecated in favor of webhook + auth. Write out generic files that inform the user of this. + """ + csv_file = Path(auth_file) + csv_file.parent.mkdir(exist_ok=True) + + csv_backup = Path("{}.{}".format(csv_file, AUTH_BACKUP_EXT)) + if csv_file.exists() and not csv_backup.exists(): + csv_file.rename(csv_backup) + with csv_file.open("w") as f: + f.write("# File-based authentication was removed in Charmed Kubernetes 1.19\n") + + +def migrate_auth_file(filename): + """Create secrets or known tokens depending on what file is being migrated.""" + with open(str(filename), "r") as f: + rows = list(csv.reader(f)) + + for row in rows: + try: + if row[0].startswith("#"): + continue + else: + if filename == AUTH_BASIC_FILE: + create_known_token(*row) + elif filename == AUTH_TOKENS_FILE: + create_secret(*row) + else: + # log and return if we don't recognize the auth file + hookenv.log("Unknown auth file: {}".format(filename)) + return False + except IndexError: + pass + deprecate_auth_file(filename) + return True + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def create_known_token(token, username, user, groups=None): + known_tokens = Path(AUTH_TOKENS_FILE) + known_tokens.parent.mkdir(exist_ok=True) + csv_fields = ["token", "username", "user", "groups"] + + try: + with known_tokens.open("r") as f: + tokens_by_user = {r["user"]: r for r in csv.DictReader(f, csv_fields)} + except FileNotFoundError: + tokens_by_user = {} + tokens_by_username = {r["username"]: r for r in tokens_by_user.values()} + + if user in tokens_by_user: + record = tokens_by_user[user] + elif username in tokens_by_username: + record = tokens_by_username[username] + else: + record = tokens_by_user[user] = {} + record.update( + { + "token": token, + "username": username, + "user": user, + "groups": groups, + } + ) + + if not record["groups"]: + del record["groups"] + + with known_tokens.open("w") as f: + csv.DictWriter(f, csv_fields, lineterminator="\n").writerows( + tokens_by_user.values() + ) + + +def delete_secret(secret_id): + """Delete a given secret id.""" + # If this fails, it's most likely because we're trying to delete a secret + # that doesn't exist. Let the caller decide if failure is a problem. + return kubernetes_common.kubectl_success( + "delete", "secret", "-n", AUTH_SECRET_NS, secret_id + ) + + +def get_csv_password(csv_fname, user): + """Get the password for the given user within the csv file provided.""" + root_cdk = "/root/cdk" + tokens_fname = Path(root_cdk) / csv_fname + if not tokens_fname.is_file(): + return None + with tokens_fname.open("r") as stream: + for line in stream: + record = line.split(",") + try: + if record[1] == user: + return record[0] + except IndexError: + # probably a blank line or comment; move on + continue + return None + + +try: + ipaddress.IPv4Network.subnet_of +except AttributeError: + # Returns True if a is subnet of b + # This method is copied from cpython as it is available only from + # python 3.7 + # https://github.com/python/cpython/blob/3.7/Lib/ipaddress.py#L1000 + def _is_subnet_of(a, b): + try: + # Always false if one is v4 and the other is v6. + if a._version != b._version: + raise TypeError("{} and {} are not of the same version".format(a, b)) + return ( + b.network_address <= a.network_address + and b.broadcast_address >= a.broadcast_address + ) + except AttributeError: + raise TypeError( + "Unable to test subnet containment " "between {} and {}".format(a, b) + ) + + ipaddress.IPv4Network.subnet_of = _is_subnet_of + ipaddress.IPv6Network.subnet_of = _is_subnet_of + + +def is_service_cidr_expansion(): + service_cidr_from_db = db.get("kubernetes-master.service-cidr") + service_cidr_from_config = hookenv.config("service-cidr") + if not service_cidr_from_db: + return False + + # Do not consider as expansion if both old and new service cidr are same + if service_cidr_from_db == service_cidr_from_config: + return False + + current_networks = kubernetes_common.get_networks(service_cidr_from_db) + new_networks = kubernetes_common.get_networks(service_cidr_from_config) + if len(current_networks) != len(new_networks) or not all( + cur.subnet_of(new) for cur, new in zip(current_networks, new_networks) + ): + hookenv.log("WARN: New k8s service cidr not superset of old one") + return False + + return True + + +def service_cidr(): + """Return the charm's service-cidr config""" + frozen_cidr = db.get("kubernetes-master.service-cidr") + return frozen_cidr or hookenv.config("service-cidr") + + +def freeze_service_cidr(): + """Freeze the service CIDR. Once the apiserver has started, we can no + longer safely change this value.""" + frozen_service_cidr = db.get("kubernetes-master.service-cidr") + if not frozen_service_cidr or is_service_cidr_expansion(): + db.set("kubernetes-master.service-cidr", hookenv.config("service-cidr")) + + +def get_preferred_service_network(service_cidrs): + """Get the network preferred for cluster service, preferring IPv4""" + net_ipv4 = kubernetes_common.get_ipv4_network(service_cidrs) + net_ipv6 = kubernetes_common.get_ipv6_network(service_cidrs) + return net_ipv4 or net_ipv6 + + +def get_dns_ip(): + return kubernetes_common.get_service_ip("kube-dns", namespace="kube-system") + + +def get_kubernetes_service_ips(): + """Get the IP address(es) for the kubernetes service based on the cidr.""" + return [ + next(network.hosts()).exploded + for network in kubernetes_common.get_networks(service_cidr()) + ] + + +def get_snap_revs(snaps): + """Get a dict of snap revisions for a given list of snaps.""" + channel = hookenv.config("channel") + rev_info = {} + for s in sorted(snaps): + try: + # valid info should looke like: + # ... + # channels: + # latest/stable: 1.18.8 2020-08-27 (1595) 22MB classic + # latest/candidate: 1.18.8 2020-08-27 (1595) 22MB classic + # ... + info = check_output(["snap", "info", s]).decode("utf8", errors="ignore") + except CalledProcessError: + # If 'snap info' fails for whatever reason, just empty the info + info = "" + snap_rev = None + yaml_data = safe_load(info) + if yaml_data and "channels" in yaml_data: + try: + # valid data should look like: + # ['1.18.8', '2020-08-27', '(1604)', '21MB', 'classic'] + d = yaml_data["channels"][channel].split() + snap_rev = d[2].strip("()") + except (KeyError, IndexError): + hookenv.log( + "Could not determine revision for snap: {}".format(s), + level=hookenv.WARNING, + ) + rev_info[s] = snap_rev + return rev_info + + +def check_service(service, attempts=6, delay=10): + """Check if a given service is up, giving it a bit of time to come up if needed. + + Returns True if the service is running, False if not, or raises a ValueError if + the service is unknown. Will automatically handle translating control-plane component + names (e.g., kube-apiserver) to service names (snap.kube-apiserver.daemon). + """ + for pattern in ("{}", "snap.{}", "snap.{}.daemon", "snap.kube-{}.daemon"): + if host.service("is-enabled", pattern.format(service)): + service = pattern.format(service) + break + else: + raise ValueError("Unknown service: {}".format(service)) + # Give each service up to a minute to become active; this is especially + # needed now that controller-mgr/scheduler/proxy need the apiserver + # to validate their token against a k8s secret. + attempt = 0 + while attempt < attempts: + hookenv.log( + "Checking if {} is active ({} / {})".format(service, attempt, attempts) + ) + if host.service_running(service): + return True + sleep(delay) + attempt += 1 + return False diff --git a/kubernetes-control-plane/lib/charms/layer/kubernetes_node_base.py b/kubernetes-control-plane/lib/charms/layer/kubernetes_node_base.py new file mode 100644 index 0000000..ba49416 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/kubernetes_node_base.py @@ -0,0 +1,121 @@ +"""Library shared between kubernetes control plane and kubernetes worker charms.""" + +from subprocess import call +from os import PathLike +import time +from typing import Union, List + +from charms.layer.kubernetes_common import get_node_name +from charms.reactive import is_state +from charmhelpers.core import hookenv, unitdata + +db = unitdata.kv() + + +class LabelMaker: + """Use to apply labels to a kubernetes node.""" + + class NodeLabelError(Exception): + """Raised when there's an error labeling a node.""" + + pass + + def __init__(self, kubeconfig_path: Union[PathLike, str]): + self.kubeconfig_path = kubeconfig_path + self.node = get_node_name() + + @staticmethod + def _retried_call(cmd: List[str], retry_msg: str, timeout: int = 180) -> bool: + deadline = time.time() + timeout + while time.time() < deadline: + code = call(cmd) + if code == 0: + return True + hookenv.log(retry_msg) + time.sleep(1) + else: + return False + + def set_label(self, label: str, value: str) -> None: + """ + Add a label to this node. + + @param str label: Label name to apply + @param str value: Value to associate with the label + @raises LabelMaker.NodeLabelError: if the label cannot be added + """ + cmd = "kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite" + cmd = cmd.format(self.kubeconfig_path, self.node, label, value) + retry_msg = "Failed to apply label {0}={1}. Will retry.".format(label, value) + if not LabelMaker._retried_call(cmd.split(), retry_msg): + raise LabelMaker.NodeLabelError(retry_msg) + + def remove_label(self, label: str) -> None: + """ + Remove a label to this node. + + @param str label: Label name to remove + @raises LabelMaker.NodeLabelError: if the label cannot be removed + """ + cmd = "kubectl --kubeconfig={0} label node {1} {2}-" + cmd = cmd.format(self.kubeconfig_path, self.node, label) + retry_msg = "Failed to remove label {0}. Will retry.".format(label) + if not LabelMaker._retried_call(cmd.split(), retry_msg): + raise LabelMaker.NodeLabelError(retry_msg) + + def apply_node_labels(self) -> None: + """ + Parse the `labels` configuration option and apply the labels to the + node. + + @raises LabelMaker.NodeLabelError: if the label cannot be added or removed + """ + # Get the user's configured labels. + config = hookenv.config() + user_labels = {} + for item in config.get("labels").split(" "): + try: + key, val = item.split("=") + except ValueError: + hookenv.log("Skipping malformed option: {}.".format(item)) + else: + user_labels[key] = val + # Collect the current label state. + current_labels = db.get("current_labels") or {} + + try: + # Remove any labels that the user has removed from the config. + for key in list(current_labels.keys()): + if key not in user_labels: + self.remove_label(key) + del current_labels[key] + db.set("current_labels", current_labels) + + # Add any new labels. + for key, val in user_labels.items(): + self.set_label(key, val) + current_labels[key] = val + db.set("current_labels", current_labels) + + # Set the juju-application label. + self.set_label("juju-application", hookenv.service_name()) + + # Set the juju.io/cloud label. + juju_io_cloud_labels = [ + ("aws", "ec2"), + ("gcp", "gce"), + ("openstack", "openstack"), + ("vsphere", "vsphere"), + ("azure", "azure"), + ] + for endpoint, label in juju_io_cloud_labels: + if is_state("endpoint.{0}.ready".format(endpoint)): + self.set_label("juju.io/cloud", label) + break + else: + # none of the endpoints matched, remove the label + self.remove_label("juju.io/cloud") + + except self.NodeLabelError as ex: + hookenv.log(str(ex)) + raise diff --git a/kubernetes-control-plane/lib/charms/layer/nagios.py b/kubernetes-control-plane/lib/charms/layer/nagios.py new file mode 100644 index 0000000..f6ad998 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/nagios.py @@ -0,0 +1,60 @@ +from pathlib import Path + +NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins' + + +def install_nagios_plugin_from_text(text, plugin_name): + """ Install a nagios plugin. + + Args: + text: Plugin source code (str) + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + # we could complain here, test the files are the same contents, or + # just bail. Idempotency is a big deal in Juju, so I'd like to be + # ok with being called with the same file multiple times, but we + # certainly want to catch the case where multiple layers are using + # the same filename for their nagios checks. + dest = dest_path.read_text() + if dest == text: + # same file + return dest_path + # different file contents! + # maybe someone changed options or something so we need to write + # it again + + dest_path.write_text(text) + dest_path.chmod(0o755) + + return dest_path + + +def install_nagios_plugin_from_file(source_file_path, plugin_name): + """ Install a nagios plugin. + + Args: + source_file_path: Path to plugin source file + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + + return install_nagios_plugin_from_text(Path(source_file_path).read_text(), + plugin_name) + + +def remove_nagios_plugin(plugin_name): + """ Remove a nagios plugin. + + Args: + plugin_name: Name of the plugin in nagios + + Returns: None + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + dest_path.unlink() diff --git a/kubernetes-control-plane/lib/charms/layer/options.py b/kubernetes-control-plane/lib/charms/layer/options.py new file mode 100644 index 0000000..d3f273f --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/options.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path + +import yaml + + +_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.')) +_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml' +_CACHE = {} + + +def get(section=None, option=None, layer_file=_DEFAULT_FILE): + if option and not section: + raise ValueError('Cannot specify option without section') + + layer_file = (_CHARM_PATH / layer_file).resolve() + if layer_file not in _CACHE: + with layer_file.open() as fp: + _CACHE[layer_file] = yaml.safe_load(fp.read()) + + data = _CACHE[layer_file].get('options', {}) + if section: + data = data.get(section, {}) + if option: + data = data.get(option) + return data diff --git a/kubernetes-control-plane/lib/charms/layer/snap.py b/kubernetes-control-plane/lib/charms/layer/snap.py new file mode 100644 index 0000000..ae9be45 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/snap.py @@ -0,0 +1,492 @@ +# Copyright 2016-2019 Canonical Ltd. +# +# This file is part of the Snap layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +import tenacity +import yaml + +from charmhelpers.core import hookenv +from charms import layer +from charms import reactive +from charms.reactive.helpers import any_file_changed, data_changed +from datetime import datetime, timedelta + + +def get_installed_flag(snapname): + return "snap.installed.{}".format(snapname) + + +def get_refresh_available_flag(snapname): + return "snap.refresh-available.{}".format(snapname) + + +def get_local_flag(snapname): + return "snap.local.{}".format(snapname) + + +def get_disabled_flag(snapname): + return "snap.disabled.{}".format(snapname) + + +def install(snapname, **kw): + """Install a snap. + + Snap will be installed from the coresponding resource if available, + otherwise from the Snap Store. + + Sets the snap.installed.{snapname} flag. + + If the snap.installed.{snapname} flag is already set then the refresh() + function is called. + """ + installed_flag = get_installed_flag(snapname) + local_flag = get_local_flag(snapname) + if reactive.is_flag_set(installed_flag): + refresh(snapname, **kw) + else: + if hookenv.has_juju_version("2.0"): + res_path = _resource_get(snapname) + if res_path is False: + _install_store(snapname, **kw) + else: + _install_local(res_path, **kw) + reactive.set_flag(local_flag) + else: + _install_store(snapname, **kw) + reactive.set_flag(installed_flag) + + # Installing any snap will first ensure that 'core' is installed. Set an + # appropriate flag for consumers that want to get/set core options. + core_installed = get_installed_flag("core") + if not reactive.is_flag_set(core_installed): + reactive.set_flag(core_installed) + + +def is_installed(snapname): + return reactive.is_flag_set(get_installed_flag(snapname)) + + +def is_local(snapname): + return reactive.is_flag_set(get_local_flag(snapname)) + + +def get_installed_snaps(): + """Return a list of snaps which are installed by this layer.""" + flag_prefix = "snap.installed." + return [flag[len(flag_prefix) :] for flag in reactive.get_flags() if flag.startswith(flag_prefix)] + + +def refresh(snapname, **kw): + """Update a snap. + + Snap will be pulled from the coresponding resource if available + and reinstalled if it has changed. Otherwise a 'snap refresh' is + run updating the snap from the Snap Store, potentially switching + channel and changing confinement options. + """ + # Note that once you upload a resource, you can't remove it. + # This means we don't need to cope with an operator switching + # from a resource provided to a store provided snap, because there + # is no way for them to do that. Well, actually the operator could + # upload a zero byte resource, but then we would need to uninstall + # the snap before reinstalling from the store and that has the + # potential for data loss. + local_flag = get_local_flag(snapname) + if hookenv.has_juju_version("2.0"): + res_path = _resource_get(snapname) + if res_path is False: + _refresh_store(snapname, **kw) + reactive.clear_flag(local_flag) + else: + _install_local(res_path, **kw) + reactive.set_flag(local_flag) + else: + _refresh_store(snapname, **kw) + reactive.clear_flag(local_flag) + + +def remove(snapname): + hookenv.log("Removing snap {}".format(snapname)) + subprocess.check_call(["snap", "remove", snapname]) + reactive.clear_flag(get_installed_flag(snapname)) + + +def connect(plug, slot): + """Connect or reconnect a snap plug with a slot. + + Each argument must be a two element tuple, corresponding to + the two arguments to the 'snap connect' command. + """ + hookenv.log("Connecting {} to {}".format(plug, slot), hookenv.DEBUG) + subprocess.check_call(["snap", "connect", plug, slot]) + + +def connect_all(): + """Connect or reconnect all interface connections defined in layer.yaml. + + This method will fail if called before all referenced snaps have been + installed. + """ + opts = layer.options("snap") + for snapname, snap_opts in opts.items(): + for plug, slot in snap_opts.get("connect", []): + connect(plug, slot) + + +def disable(snapname): + """Disables a snap in the system + + Sets the snap.disabled.{snapname} flag + + This method doesn't affect any snap flag if requested snap does not + exist + """ + hookenv.log("Disabling {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot disable {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "disable", snapname]) + reactive.set_flag(get_disabled_flag(snapname)) + + +def enable(snapname): + """Enables a snap in the system + + Clears the snap.disabled.{snapname} flag + + This method doesn't affect any snap flag if requeted snap does not + exist + """ + hookenv.log("Enabling {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot enable {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "enable", snapname]) + reactive.clear_flag(get_disabled_flag(snapname)) + + +def restart(snapname): + """Restarts a snap in the system + + This method doesn't affect any snap flag if requested snap does not + exist + """ + hookenv.log("Restarting {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot restart {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "restart", snapname]) + + +def set(snapname, key, value): + """Changes configuration options in a snap + + This method will fail if snapname is not an installed snap + """ + hookenv.log("Set config {}={} for snap {}".format(key, value, snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot set {} snap config because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "set", snapname, "{}={}".format(key, value)]) + + +def set_refresh_timer(timer=""): + """Set the system refresh.timer option (snapd 2.31+) + + This method sets how often snapd will refresh installed snaps. Call with + an empty timer string to use the system default (currently 4x per day). + Use 'max' to schedule refreshes as far into the future as possible + (currently 1 month). Also accepts custom timer strings as defined in the + refresh.timer section here: + https://forum.snapcraft.io/t/system-options/87 + + This method does not validate custom strings and will lead to a + CalledProcessError if an invalid string is given. + + :param: timer: empty string (default), 'max', or custom string + """ + if timer == "max": + # A month from yesterday is the farthest we should delay to safely stay + # under the 1 month max. Translate that to a valid refresh.timer value. + # Examples: + # - Today is Friday the 13th, set the refresh timer to + # 'thu2' (Thursday the 12th is the 2nd thursday of the month). + # - Today is Tuesday the 1st, set the refresh timer to + # 'mon5' (Monday the [28..31] is the 5th monday of the month). + yesterday = datetime.now() - timedelta(1) + dow = yesterday.strftime("%a").lower() + # increment after int division because we want occurrence 1-5, not 0-4. + occurrence = yesterday.day // 7 + 1 + timer = "{}{}".format(dow, occurrence) + + # NB: 'system' became synonymous with 'core' in 2.32.5, but we use 'core' + # here to ensure max compatibility. + set(snapname="core", key="refresh.timer", value=timer) + subprocess.check_call(["systemctl", "restart", "snapd.service"]) + + +def get(snapname, key): + """Gets configuration options for a snap + + This method returns the stripped output from the snap get command. + This method will fail if snapname is not an installed snap. + """ + hookenv.log("Get config {} for snap {}".format(key, snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get {} snap config because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + return subprocess.check_output(["snap", "get", snapname, key]).strip() + + +def _snap_list(): + """Constructs a dict with all installed snaps. + + Queries all the snaps installed and returns a dict containing their + versions and tracking channels, indexed by the snap name. + """ + cmd = ["snap", "list"] + out = subprocess.check_output(cmd).decode("utf-8", errors="replace").split() + snaps = {} + for i in range(6, len(out) - 5, 6): # Skip first six, which are the titles + # Snap list has 6 columns: + # name, version, revision, tracking channel, publisher and notes + # We only care about name (0), version (1) and tracking channel (3) + snaps[out[i]] = { + 'version': out[i + 1], + 'channel': out[i + 3], + } + return snaps + + +def get_installed_version(snapname): + """Gets the installed version of a snapname. + This function will return nothing if snapname is not an installed snap. + """ + hookenv.log("Get installed key for snap {}".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get {} snap installed version because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + try: + return _snap_list()[snapname]['version'] + except Exception as e: + # If it fails to get the version information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap version: {}".format(e), + hookenv.WARNING, + ) + return + + +def get_installed_channel(snapname): + """Gets the tracking (channel) of a snapname. + This function will return nothing if snapname is not an installed snap. + """ + hookenv.log("Get channel for snap {}".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get snap tracking (channel) because it is not installed", + hookenv.WARNING, + ) + return + try: + return _snap_list()[snapname]['channel'] + except Exception as e: + # If it fails to get the channel information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap tracking (channel): {}".format(e), + hookenv.WARNING, + ) + return + + +def _snap_args( + channel="stable", + devmode=False, + jailmode=False, + dangerous=False, + force_dangerous=False, + connect=None, + classic=False, + revision=None, +): + yield "--channel={}".format(channel) + if devmode is True: + yield "--devmode" + if jailmode is True: + yield "--jailmode" + if force_dangerous is True or dangerous is True: + yield "--dangerous" + if classic is True: + yield "--classic" + if revision is not None: + yield "--revision={}".format(revision) + + +def _install_local(path, **kw): + key = "snap.local.{}".format(path) + if data_changed(key, kw) or any_file_changed([path]): + cmd = ["snap", "install"] + cmd.extend(_snap_args(**kw)) + cmd.append("--dangerous") + cmd.append(path) + hookenv.log("Installing {} from local resource".format(path)) + subprocess.check_call(cmd) + + +def _install_store(snapname, **kw): + """Install snap from store + + :param snapname: Name of snap to install + :type snapname: str + :param kw: Keyword arguments to pass on to ``snap install`` + :type kw: Dict[str, str] + :raises: subprocess.CalledProcessError + """ + cmd = ["snap", "install"] + cmd.extend(_snap_args(**kw)) + cmd.append(snapname) + hookenv.log("Installing {} from store".format(snapname)) + + # Use tenacity decorator for Trusty support (See LP Bug #1934163) + @tenacity.retry( + wait=tenacity.wait_fixed(10), # seconds + stop=tenacity.stop_after_attempt(3), + reraise=True, + ) + def _run_install(): + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + hookenv.log( + 'Installation successful cmd="{}" output="{}"'.format(cmd, out), + level=hookenv.DEBUG, + ) + reactive.clear_flag(get_local_flag(snapname)) + except subprocess.CalledProcessError as cp: + hookenv.log( + 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), + level=hookenv.ERROR, + ) + raise + + _run_install() + + +def _refresh_store(snapname, **kw): + if not data_changed("snap.opts.{}".format(snapname), kw): + return + + # --amend allows us to refresh from a local resource + cmd = ["snap", "refresh", "--amend"] + cmd.extend(_snap_args(**kw)) + cmd.append(snapname) + hookenv.log("Refreshing {} from store".format(snapname)) + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + print(out) + + +def _resource_get(snapname): + """Used to fetch the resource path of the given name. + + This wrapper obtains a resource path and adds an additional + check to return False if the resource is zero length. + """ + res_path = hookenv.resource_get(snapname) + if res_path and os.stat(res_path).st_size != 0: + return res_path + return False + + +def get_available_refreshes(): + """Return a list of snaps which have refreshes available.""" + try: + out = subprocess.check_output(["snap", "refresh", "--list"]).decode("utf8") + except subprocess.CalledProcessError: + # If snap refresh fails for whatever reason, we should just return no + # refreshes available - LP:1869630. + return [] + + if out == "All snaps up to date.": + return [] + else: + return [line.split()[0] for line in out.splitlines()[1:]] + + +def is_refresh_available(snapname): + """Check whether a new revision is available for the given snap.""" + return reactive.is_flag_set(get_refresh_available_flag(snapname)) + + +def _check_refresh_available(snapname): + return snapname in get_available_refreshes() + + +def create_cohort_snapshot(snapname): + """Create a new cohort key for the given snap. + + Cohort keys represent a snapshot of the revision of a snap at the time + the key was created. These keys can then be used on any machine to lock + the revision of the snap until a new cohort is joined (or the key expires, + after 90 days). This is used to maintain consistency of the revision of + the snap across units or applications, and to manage the refresh of the + snap in a controlled manner. + + Returns a cohort key. + """ + out = subprocess.check_output(["snap", "create-cohort", snapname]) + data = yaml.safe_load(out.decode("utf8")) + return data["cohorts"][snapname]["cohort-key"] + + +def join_cohort_snapshot(snapname, cohort_key): + """Refresh the snap into the given cohort. + + If the snap was previously in a cohort, this will update the revision + to that of the new cohort snapshot. Note that this does not change the + channel that the snap is in, only the revision within that channel. + """ + if is_local(snapname): + # joining a cohort can override a locally installed snap + hookenv.log("Skipping joining cohort for local snap: " "{}".format(snapname)) + return + subprocess.check_output(["snap", "refresh", snapname, "--cohort", cohort_key]) + # even though we just refreshed to the latest in the cohort, it's + # slightly possible that there's a newer rev available beyond the cohort + reactive.toggle_flag(get_refresh_available_flag(snapname), _check_refresh_available(snapname)) diff --git a/kubernetes-control-plane/lib/charms/layer/status.py b/kubernetes-control-plane/lib/charms/layer/status.py new file mode 100644 index 0000000..95b2997 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/status.py @@ -0,0 +1,189 @@ +import inspect +import errno +import subprocess +import yaml +from enum import Enum +from functools import wraps +from pathlib import Path + +from charmhelpers.core import hookenv +from charms import layer + + +_orig_call = subprocess.call +_statuses = {'_initialized': False, + '_finalized': False} + + +class WorkloadState(Enum): + """ + Enum of the valid workload states. + + Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + """ + # note: order here determines precedence of state + MAINTENANCE = 'maintenance' + BLOCKED = 'blocked' + WAITING = 'waiting' + ACTIVE = 'active' + + +def maintenance(message): + """ + Set the status to the `MAINTENANCE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.MAINTENANCE, message) + + +def maint(message): + """ + Shorthand alias for + [maintenance](status.md#charms.layer.status.maintenance). + + # Parameters + `message` (str): Message to convey to the operator. + """ + maintenance(message) + + +def blocked(message): + """ + Set the status to the `BLOCKED` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.BLOCKED, message) + + +def waiting(message): + """ + Set the status to the `WAITING` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.WAITING, message) + + +def active(message): + """ + Set the status to the `ACTIVE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.ACTIVE, message) + + +def status_set(workload_state, message): + """ + Set the status to the given workload state with a message. + + # Parameters + `workload_state` (WorkloadState or str): State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. + `message` (str): Message to convey to the operator. + """ + if not isinstance(workload_state, WorkloadState): + workload_state = WorkloadState(workload_state) + if workload_state is WorkloadState.MAINTENANCE: + _status_set_immediate(workload_state, message) + return + layer = _find_calling_layer() + _statuses.setdefault(workload_state, []).append((layer, message)) + if not _statuses['_initialized'] or _statuses['_finalized']: + # We either aren't initialized, so the finalizer may never be run, + # or the finalizer has already run, so it won't run again. In either + # case, we need to manually invoke it to ensure the status gets set. + _finalize() + + +def _find_calling_layer(): + for frame in inspect.stack(): + # switch to .filename when trusty (Python 3.4) is EOL + fn = Path(frame[1]) + if fn.parent.stem not in ('reactive', 'layer', 'charms'): + continue + layer_name = fn.stem + if layer_name == 'status': + continue # skip our own frames + return layer_name + return None + + +def _initialize(): + if not _statuses['_initialized']: + if layer.options.get('status', 'patch-hookenv'): + _patch_hookenv() + hookenv.atexit(_finalize) + _statuses['_initialized'] = True + + +def _finalize(): + if _statuses['_initialized']: + # If we haven't been initialized, we can't truly be finalized. + # This makes things more efficient if an action sets a status + # but subsequently starts the reactive bus. + _statuses['_finalized'] = True + charm_name = hookenv.charm_name() + charm_dir = Path(hookenv.charm_dir()) + with charm_dir.joinpath('layer.yaml').open() as fp: + includes = yaml.safe_load(fp.read()).get('includes', []) + layer_order = includes + [charm_name] + + for workload_state in WorkloadState: + if workload_state not in _statuses: + continue + if not _statuses[workload_state]: + continue + + def _get_key(record): + layer_name, message = record + if layer_name in layer_order: + return layer_order.index(layer_name) + else: + return 0 + + sorted_statuses = sorted(_statuses[workload_state], key=_get_key) + layer_name, message = sorted_statuses[-1] + _status_set_immediate(workload_state, message) + break + + +def _status_set_immediate(workload_state, message): + workload_state = workload_state.value + try: + hookenv.log('status-set: {}: {}'.format(workload_state, message), + hookenv.INFO) + ret = _orig_call(['status-set', workload_state, message]) + if ret == 0: + return + except OSError as e: + # ignore status-set not available on older controllers + if e.errno != errno.ENOENT: + raise + + +def _patch_hookenv(): + # we can't patch hookenv.status_set directly because other layers may have + # already imported it into their namespace, so we have to patch sp.call + subprocess.call = _patched_call + + +@wraps(_orig_call) +def _patched_call(cmd, *args, **kwargs): + if not isinstance(cmd, list) or cmd[0] != 'status-set': + return _orig_call(cmd, *args, **kwargs) + _, workload_state, message = cmd + status_set(workload_state, message) + return 0 # make hookenv.status_set not emit spurious failure logs diff --git a/kubernetes-control-plane/lib/charms/layer/tls_client.py b/kubernetes-control-plane/lib/charms/layer/tls_client.py new file mode 100644 index 0000000..b2980dc --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/tls_client.py @@ -0,0 +1,61 @@ +# Copyright 2016-2017 Canonical Ltd. +# +# This file is part of the tls-client layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import log +from charmhelpers.core import unitdata + +from charms.reactive import remove_state +from charms.reactive import endpoint_from_flag + + +def reset_certificate_write_flag(cert_type): + """ + Reset the certificate written flag so notification will work on the next + write cert_type must be 'server', 'client', or 'ca' to indicate type of + certificate + """ + if cert_type not in ['server', 'client', 'ca']: + log('Unknown certificate type!') + else: + remove_state('tls_client.{0}.certificate.written'.format(cert_type)) + + +def request_server_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_server_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('server', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) + + +def request_client_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_client_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('client', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) diff --git a/kubernetes-control-plane/lib/charms/layer/vault_kv.py b/kubernetes-control-plane/lib/charms/layer/vault_kv.py new file mode 100644 index 0000000..c2de0b6 --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/vault_kv.py @@ -0,0 +1,273 @@ +import json +from hashlib import md5 + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata +from charmhelpers.contrib.openstack.vaultlocker import retrieve_secret_id +from charms.reactive import data_changed +from charms.reactive import endpoint_from_flag +from charms.reactive import set_flag, clear_flag, get_flags + +import requests +import hvac + + +def log(msg, *args, **kwargs): + hookenv.log( + "vault-kv.log: {}".format(msg.format(*args, **kwargs)), level=hookenv.DEBUG + ) + + +class VaultNotReady(Exception): + """ + Exception indicating that Vault was accessed before it was ready. + """ + + pass + + +class _Singleton(type): + # metaclass to make a class a singleton + def __call__(cls, *args, **kwargs): + if not isinstance(getattr(cls, "_singleton_instance", None), cls): + cls._singleton_instance = super().__call__(*args, **kwargs) + return cls._singleton_instance + + +class _VaultBaseKV(dict, metaclass=_Singleton): + _path = None # set by subclasses + + def __init__(self): + response = self._client.read(self._path) + data = response["data"] if response else {} + super().__init__(data) + + @property + def _client(self): + """ + Get an authenticated hvac.Client. + + The authentication token for the client is only valid for 60 seconds, + after which a new client will need to be authenticated. + """ + try: + log( + "Logging {cls} in to {vault_url}", + cls=type(self).__name__, + vault_url=self._config["vault_url"], + ) + client = hvac.Client(url=self._config["vault_url"]) + client.auth_approle(self._config["role_id"], self._config["secret_id"]) + return client + except ( + requests.exceptions.ConnectionError, + hvac.exceptions.VaultDown, + hvac.exceptions.VaultNotInitialized, + hvac.exceptions.BadGateway, + ) as e: + raise VaultNotReady() from e + + @property + def _config(self): + _VaultBaseKV._config = get_vault_config() + return _VaultBaseKV._config + + def __setitem__(self, key, value): + log("Writing data to vault") + self._client.write(self._path, **{key: value}) + super().__setitem__(key, value) + + def set(self, key, value): + # alias in case a KV-like interface is preferred + self[key] = value + + +class VaultUnitKV(_VaultBaseKV): + """ + A simplified interface for storing data in Vault, with the data scoped to + the current unit. + + Keys must be strings, but data can be structured as long as it is + JSON-serializable. + + This class can be used as a dict, or you can use `self.get` and `self.set` + for a more KV-like interface. When values are set, via either style, they + are immediately persisted to Vault. Values are also cached in memory. + + Note: This class is a singleton. + """ + + def __init__(self): + unit_num = hookenv.local_unit().split("/")[1] + self._path = "{}/kv/unit/{}".format(self._config["secret_backend"], unit_num) + super().__init__() + + +class VaultAppKV(_VaultBaseKV): + """ + A simplified interface for storing data in Vault, with data shared by every + unit of the application. + + Keys must be strings, but data can be structured as long as it is + JSON-serializable. + + This class can be used as a dict, or you can use `self.get` and `self.set` + for a more KV-like interface. When values are set, via either style, they + are immediately persisted to Vault. Values are also cached in memory. + + Note: This is intended to be used as a secure replacement for leadership + data. Therefore, only the leader should set data here. This is not + enforced, but data changed by non-leaders will not trigger hooks on other + units, so they may not be notified of changes in a timely fashion. + + Note: This class is a singleton. + """ + + def __init__(self): + self._path = "{}/kv/app".format(self._config["secret_backend"]) + self._hash_path = "{}/kv/app-hashes/{}".format( + self._config["secret_backend"], hookenv.local_unit().split("/")[1] + ) + super().__init__() + self._load_hashes() + + def _load_hashes(self): + log("Reading hashes from {}", self._hash_path) + response = self._client.read(self._hash_path) + self._old_hashes = response["data"] if response else {} + self._new_hashes = {} + for key in self.keys(): + self._rehash(key) + + def _rehash(self, key): + serialized = json.dumps(self[key], sort_keys=True).encode("utf8") + self._new_hashes[key] = md5(serialized).hexdigest() + + def __setitem__(self, key, value): + super().__setitem__(key, value) + self._rehash(key) + self._manage_flags(key) + + def _manage_flags(self, key): + flag_any_changed = "layer.vault-kv.app-kv.changed" + flag_key_changed = "layer.vault-kv.app-kv.changed.{}".format(key) + flag_key_set = "layer.vault-kv.app-kv.set.{}".format(key) + if self.is_changed(key): + # clear then set flag to ensure triggers are run even if the main + # flag was never cleared + clear_flag(flag_any_changed) + set_flag(flag_any_changed) + clear_flag(flag_key_changed) + set_flag(flag_key_changed) + if self.get(key) is not None: + set_flag(flag_key_set) + else: + clear_flag(flag_key_set) + + @classmethod + def _clear_all_flags(cls): + for flag in get_flags(): + if flag.startswith("layer.vault-kv.app-kv."): + clear_flag(flag) + + def is_changed(self, key): + """ + Determine if the value for the given key has changed. + + In order to detect changes, hashes of the values are also stored + in Vault. These hashes are updated automatically at exit via + `self.update_hashes()`. + """ + return self._new_hashes.get(key) != self._old_hashes.get(key) + + def any_changed(self): + """ + Determine if any data has changed. + + In order to detect changes, hashes of the values are also stored + in Vault. These hashes are updated automatically at exit via + `self.update_hashes()`. + """ + all_keys = self._new_hashes.keys() | self._old_hashes.keys() + return any(self.is_changed(key) for key in all_keys) + + def update_hashes(self): + """ + Update the hashes in Vault, thus marking all fields as unchanged. + + This is done automatically at exit. + """ + log("Writing hashes to {}", self._hash_path) + self._client.write(self._hash_path, **self._new_hashes) + self._old_hashes.clear() + self._old_hashes.update(self._new_hashes) + + +def get_vault_config(): + """ + Get the config data needed for this application to access Vault. + + This is only needed if you're using another application, such as + VaultLocker, using the secrets backend provided by this layer. + + Returns a dictionary containing the following keys: + + * vault_url + * secret_backend + * role_id + * secret_id + + Note: This data is cached in [UnitData][] so anything with access to that + could access Vault as this application. + + If any of this data changes (such as the secret_id being rotated), this + layer will set the `layer.vault-kv.config.changed` flag. + + If this is called before the Vault relation is available, it will raise + `VaultNotReady`. + + [UnitData]: https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.unitdata.html + """ # noqa + vault = endpoint_from_flag("vault-kv.available") + if not (vault and vault.vault_url and vault.unit_role_id and vault.unit_token): + raise VaultNotReady() + vault_config = { + "vault_url": vault.vault_url, + "secret_backend": _get_secret_backend(), + "role_id": vault.unit_role_id, + "secret_id": _get_secret_id(vault), + } + return vault_config + + +def _get_secret_backend(): + app_name = hookenv.application_name() + return "charm-{}".format(app_name) + + +def _get_secret_id(vault): + token = vault.unit_token + if data_changed("layer.vault-kv.token", token): + log("Changed unit_token, getting new secret_id") + # token is one-shot, but if it changes it might mean that we're + # being told to rotate the secret ID, or we might not have fetched + # one yet + vault_url = vault.vault_url + try: + secret_id = retrieve_secret_id(vault_url, token) + except ( + requests.exceptions.ConnectionError, + hvac.exceptions.VaultDown, + hvac.exceptions.VaultNotInitialized, + hvac.exceptions.BadGateway, + ) as e: + raise VaultNotReady() from e + unitdata.kv().set("layer.vault-kv.secret_id", secret_id) + # have to flush immediately because if we don't and hit some error + # elsewhere, it could get us into a state where we have forgotten the + # secret ID and can't retrieve it again because we've already used the + # token + unitdata.kv().flush() + else: + secret_id = unitdata.kv().get("layer.vault-kv.secret_id") + return secret_id diff --git a/kubernetes-control-plane/lib/charms/layer/vaultlocker.py b/kubernetes-control-plane/lib/charms/layer/vaultlocker.py new file mode 100644 index 0000000..235224c --- /dev/null +++ b/kubernetes-control-plane/lib/charms/layer/vaultlocker.py @@ -0,0 +1,170 @@ +import json +from pathlib import Path +from subprocess import check_call, check_output, CalledProcessError +from uuid import uuid4 + +from charms.reactive import set_flag +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import unitdata +from charmhelpers.contrib.openstack.vaultlocker import ( # noqa + retrieve_secret_id, + write_vaultlocker_conf, +) +from charmhelpers.contrib.storage.linux.utils import ( + is_block_device, + is_device_mounted, + mkfs_xfs, +) + + +LOOP_ENVS = Path('/etc/vaultlocker/loop-envs') + + +class VaultLockerError(Exception): + """ + Wrapper for exceptions raised when configuring VaultLocker. + """ + def __init__(self, msg, *args, **kwargs): + super().__init__(msg.format(*args, **kwargs)) + + +def encrypt_storage(storage_name, mountbase=None): + """ + Set up encryption for the given Juju storage entry, and optionally create + and mount XFS filesystems on the encrypted storage entry location(s). + + Note that the storage entry **must** be defined with ``type: block``. + + If ``mountbase`` is not given, the location(s) will not be formatted or + mounted. When interacting with or mounting the location(s) manually, the + name returned by :func:`decrypted_device` called on the storage entry's + location should be used in place of the raw location. + + If the storage is defined as ``multiple``, the individual locations + will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}`` + is based on the storage ID. Otherwise, the storage will mounted at + ``{mountbase}/{storage_name}``. + """ + metadata = hookenv.metadata() + storage_metadata = metadata['storage'][storage_name] + if storage_metadata['type'] != 'block': + raise VaultLockerError('Cannot encrypt non-block storage: {}', + storage_name) + multiple = 'multiple' in storage_metadata + for storage_id in hookenv.storage_list(): + if not storage_id.startswith(storage_name + '/'): + continue + storage_location = hookenv.storage_get('location', storage_id) + if mountbase and multiple: + mountpoint = Path(mountbase) / storage_id + elif mountbase: + mountpoint = Path(mountbase) / storage_name + else: + mountpoint = None + encrypt_device(storage_location, mountpoint) + set_flag('layer.vaultlocker.{}.ready'.format(storage_id)) + set_flag('layer.vaultlocker.{}.ready'.format(storage_name)) + + +def encrypt_device(device, mountpoint=None, uuid=None): + """ + Set up encryption for the given block device, and optionally create and + mount an XFS filesystem on the encrypted device. + + If ``mountpoint`` is not given, the device will not be formatted or + mounted. When interacting with or mounting the device manually, the + name returned by :func:`decrypted_device` called on the device name + should be used in place of the raw device name. + """ + if not is_block_device(device): + raise VaultLockerError('Cannot encrypt non-block device: {}', device) + if is_device_mounted(device): + raise VaultLockerError('Cannot encrypt mounted device: {}', device) + hookenv.log('Encrypting device: {}'.format(device)) + if uuid is None: + uuid = str(uuid4()) + try: + check_call(['vaultlocker', 'encrypt', '--uuid', uuid, device]) + unitdata.kv().set('layer.vaultlocker.uuids.{}'.format(device), uuid) + if mountpoint: + mapped_device = decrypted_device(device) + hookenv.log('Creating filesystem on {} ({})'.format(mapped_device, + device)) + # If this fails, it's probalby due to the size of the loopback + # backing file that is defined by the `dd`. + mkfs_xfs(mapped_device) + Path(mountpoint).mkdir(mode=0o755, parents=True, exist_ok=True) + hookenv.log('Mounting filesystem for {} ({}) at {}' + ''.format(mapped_device, device, mountpoint)) + host.mount(mapped_device, mountpoint, filesystem='xfs') + host.fstab_add(mapped_device, mountpoint, 'xfs', ','.join([ + "defaults", + "nofail", + "x-systemd.requires=vaultlocker-decrypt@{uuid}.service".format( + uuid=uuid, + ), + "comment=vaultlocker", + ])) + except (CalledProcessError, OSError) as e: + raise VaultLockerError('Error configuring VaultLocker') from e + + +def decrypted_device(device): + """ + Returns the mapped device name for the decrypted version of the encrypted + device. + + This mapped device name is what should be used for mounting the device. + """ + uuid = unitdata.kv().get('layer.vaultlocker.uuids.{}'.format(device)) + if not uuid: + return None + return '/dev/mapper/crypt-{uuid}'.format(uuid=uuid) + + +def create_encrypted_loop_mount(mount_path, block_size='1M', block_count=20, + backing_file=None): + """ + Creates a persistent loop device, encrypts it, formats it as XFS, and + mounts it at the given `mount_path`. + + A backing file will be created under `/var/lib/vaultlocker/backing_files`, + in a UUID named file, according to `block_size` and `block_count` + parameters, which map to `bs` and `count` of the `dd` command. Note that + the backing file must be a bit over 16M to allow for the XFS file system + plus some additional metadata needed for the encryption. It is not + recommended to go below the default of 20M (20 blocks, 1M each). + + The `backing_file` parameter can be used to change the location where the + backing file is created. + """ + uuid = str(uuid4()) + if backing_file is None: + backing_file = Path('/var/lib/vaultlocker/backing_files') / uuid + backing_file.parent.mkdir(parents=True, exist_ok=True) + else: + backing_file = Path(backing_file) + if backing_file.exists(): + raise VaultLockerError('Backing file already exists: {}', + backing_file) + + try: + # ensure loop devices are enabled + check_call(['modprobe', 'loop']) + # create the backing file filled with random data + check_call(['dd', 'if=/dev/urandom', 'of={}'.format(backing_file), + 'bs=8M', 'count=4']) + # claim an unused loop device + output = check_output(['losetup', '--show', '-f', str(backing_file)]) + device_name = output.decode('utf8').strip() + # encrypt the new loop device + encrypt_device(device_name, str(mount_path), uuid) + # setup the service to ensure loop device is restored after reboot + (LOOP_ENVS / uuid).write_text(''.join([ + 'BACK_FILE={}\n'.format(backing_file), + ])) + check_call(['systemctl', 'enable', + 'vaultlocker-loop@{}.service'.format(uuid)]) + except (CalledProcessError, OSError) as e: + raise VaultLockerError('Error configuring VaultLocker') from e diff --git a/kubernetes-control-plane/lib/charms/leadership.py b/kubernetes-control-plane/lib/charms/leadership.py new file mode 100644 index 0000000..d2a95fa --- /dev/null +++ b/kubernetes-control-plane/lib/charms/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.reactive import not_unless + + +__all__ = ['leader_get', 'leader_set'] + + +@not_unless('leadership.is_leader') +def leader_set(*args, **kw): + '''Change leadership settings, per charmhelpers.core.hookenv.leader_set. + + Settings may either be passed in as a single dictionary, or using + keyword arguments. All values must be strings. + + The leadership.set.{key} reactive state will be set while the + leadership hook environment setting remains set. + + Changed leadership settings will set the leadership.changed.{key} + and leadership.changed states. These states will remain set until + the following hook. + + These state changes take effect immediately on the leader, and + in future hooks run on non-leaders. In this way both leaders and + non-leaders can share handlers, waiting on these states. + ''' + if args: + if len(args) > 1: + raise TypeError('leader_set() takes 1 positional argument but ' + '{} were given'.format(len(args))) + else: + settings = dict(args[0]) + else: + settings = {} + settings.update(kw) + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + + for key, value in settings.items(): + if value != previous.get(key): + reactive.set_state('leadership.changed.{}'.format(key)) + reactive.set_state('leadership.changed') + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + hookenv.leader_set(settings) + unitdata.kv().update(settings, prefix='leadership.settings.') + + +def leader_get(attribute=None): + '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.''' + return hookenv.leader_get(attribute) diff --git a/kubernetes-control-plane/lib/debug_script.py b/kubernetes-control-plane/lib/debug_script.py new file mode 100644 index 0000000..e156924 --- /dev/null +++ b/kubernetes-control-plane/lib/debug_script.py @@ -0,0 +1,8 @@ +import os + +dir = os.environ["DEBUG_SCRIPT_DIR"] + + +def open_file(path, *args, **kwargs): + """ Open a file within the debug script dir """ + return open(os.path.join(dir, path), *args, **kwargs) diff --git a/kubernetes-control-plane/lxd-profile.yaml b/kubernetes-control-plane/lxd-profile.yaml new file mode 100644 index 0000000..6b4babc --- /dev/null +++ b/kubernetes-control-plane/lxd-profile.yaml @@ -0,0 +1,16 @@ +name: juju-default-k8s-deployment-0 +config: + linux.kernel_modules: ip_tables,ip6_tables,netlink_diag,nf_nat,overlay + raw.lxc: | + lxc.apparmor.profile=unconfined + lxc.mount.auto=proc:rw sys:rw + lxc.cgroup.devices.allow=a + lxc.cap.drop= + security.nesting: true + security.privileged: true +description: "" +devices: + aadisable: + path: /dev/kmsg + source: /dev/kmsg + type: unix-char diff --git a/kubernetes-control-plane/make_docs b/kubernetes-control-plane/make_docs new file mode 100644 index 0000000..dcd4c1f --- /dev/null +++ b/kubernetes-control-plane/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import os +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + sys.path.insert(0, 'lib') + sys.path.insert(1, 'reactive') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') + if os.path.exists('.unit-state.db'): + os.remove('.unit-state.db') diff --git a/kubernetes-control-plane/manifest.yaml b/kubernetes-control-plane/manifest.yaml new file mode 100644 index 0000000..9f3ccbf --- /dev/null +++ b/kubernetes-control-plane/manifest.yaml @@ -0,0 +1,27 @@ +analysis: + attributes: + - name: language + result: python + - name: framework + result: reactive +bases: +- architectures: + - amd64 + - s390x + - arm64 + channel: '20.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '22.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '18.04' + name: ubuntu +charmcraft-started-at: '2022-07-14T00:00:00.000000Z' +charmcraft-version: 1.7.1 diff --git a/kubernetes-control-plane/metadata.yaml b/kubernetes-control-plane/metadata.yaml new file mode 100644 index 0000000..3c4f92e --- /dev/null +++ b/kubernetes-control-plane/metadata.yaml @@ -0,0 +1,166 @@ +"name": "kubernetes-control-plane" +"summary": "The Kubernetes control plane." +"maintainers": +- "Tim Van Steenburgh " +- "George Kraft " +- "Rye Terrell " +- "Konstantinos Tsakalozos " +- "Charles Butler " +- "Matthew Bruzek " +- "Mike Wilson " +- "Joe Borg " +"description": | + Kubernetes is an open-source platform for deploying, scaling, and operations + of application containers across a cluster of hosts. Kubernetes is portable + in that it works with public, private, and hybrid clouds. Extensible through + a pluggable infrastructure. Self healing in that it will automatically + restart and place containers on healthy nodes if a node ever goes away. +"tags": +- "misc" +- "infrastructure" +- "kubernetes" +- "master" +- "control-plane" +"series": +- "focal" +- "jammy" +- "bionic" +"requires": + "certificates": + "interface": "tls-certificates" + "vault-kv": + "interface": "vault-kv" + "ha": + "interface": "hacluster" + "etcd": + "interface": "etcd" + "loadbalancer": + # Use of this relation is strongly discouraged in favor of the more + # explicit loadbalancer-internal / loadbalancer-external relations. + "interface": "public-address" + "ceph-storage": + # Deprecated: Use the ceph-client relation instead. + "interface": "ceph-admin" + "ceph-client": + "interface": "ceph-client" + "aws": + "interface": "aws-integration" + "gcp": + "interface": "gcp-integration" + "openstack": + "interface": "openstack-integration" + "vsphere": + "interface": "vsphere-integration" + "azure": + "interface": "azure-integration" + "keystone-credentials": + "interface": "keystone-credentials" + "dns-provider": + "interface": "kube-dns" + "loadbalancer-internal": + # Indicates that the LB should not be public and should use internal + # networks if available. Intended for control plane and other internal use. + "interface": "loadbalancer" + "loadbalancer-external": + # Indicates that the LB should be public facing. Intended for clients which + # must reach the API server via external networks. + "interface": "loadbalancer" + "external-cloud-provider": + "interface": "external_cloud_provider" +"provides": + "nrpe-external-master": + "interface": "nrpe-external-master" + "scope": "container" + "container-runtime": + "interface": "container-runtime" + "scope": "container" + "kube-api-endpoint": + # Use of this relation is strongly discouraged as the API endpoints will be + # provided via the kube-control relation. However, it can be used to + # override those endpoints if you need to inject a reverse proxy between + # the control-plane and workers using a charm which only supports the old MITM + # style relations. Note, though, that since this reverse proxy will not be + # visible to the control-plane, it will not be used in any of the client or + # component kube config files. + "interface": "http" + "kube-control": + "interface": "kube-control" + "cni": + "interface": "kubernetes-cni" + "scope": "container" + "prometheus": + "interface": "prometheus-manual" + "grafana": + "interface": "grafana-dashboard" + "aws-iam": + "interface": "aws-iam" + "scope": "container" +"peers": + "coordinator": + "interface": "coordinator" + "kube-masters": + "interface": "kube-masters" +"docs": "https://discourse.charmhub.io/t/kubernetes-control-plane-docs-index/6214" +"resources": + "cni-amd64": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for amd64" + "cni-arm64": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for arm64" + "cni-s390x": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for s390x" + "core": + "type": "file" + "filename": "core.snap" + "description": | + core snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kubectl": + "type": "file" + "filename": "kubectl.snap" + "description": | + kubectl snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kube-apiserver": + "type": "file" + "filename": "kube-apiserver.snap" + "description": | + kube-apiserver snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kube-controller-manager": + "type": "file" + "filename": "kube-controller-manager.snap" + "description": | + kube-controller-manager snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kube-scheduler": + "type": "file" + "filename": "kube-scheduler.snap" + "description": | + kube-scheduler snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "cdk-addons": + "type": "file" + "filename": "cdk-addons.snap" + "description": | + CDK addons snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kube-proxy": + "type": "file" + "filename": "kube-proxy.snap" + "description": | + kube-proxy snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. +"subordinate": !!bool "false" diff --git a/kubernetes-control-plane/metrics.yaml b/kubernetes-control-plane/metrics.yaml new file mode 100644 index 0000000..0d422ff --- /dev/null +++ b/kubernetes-control-plane/metrics.yaml @@ -0,0 +1,38 @@ +metrics: + juju-units: {} + pods: + type: gauge + description: number of pods + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get po --all-namespaces | tail -n+2 | wc -l + services: + type: gauge + description: number of services + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get svc --all-namespaces | tail -n+2 | wc -l + replicasets: + type: gauge + description: number of replicasets + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get rs --all-namespaces | tail -n+2 | wc -l + replicationcontrollers: + type: gauge + description: number of replicationcontrollers + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get rc --all-namespaces | tail -n+2 | wc -l + nodes: + type: gauge + description: number of kubernetes nodes + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get nodes | tail -n+2 | wc -l + nodes-gpu: + type: gauge + description: number of gpu-enabled kubernetes nodes + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get nodes -l gpu=true -o name | wc -l + persistentvolume: + type: gauge + description: number of pv + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get pv | tail -n+2 | wc -l + persistentvolumeclaims: + type: gauge + description: number of claims + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get pvc --all-namespaces | tail -n+2 | wc -l + serviceaccounts: + type: gauge + description: number of sa + command: /snap/bin/kubectl --kubeconfig /root/.kube/config get sa --all-namespaces | tail -n+2 | wc -l diff --git a/kubernetes-control-plane/pydocmd.yml b/kubernetes-control-plane/pydocmd.yml new file mode 100644 index 0000000..7b3a610 --- /dev/null +++ b/kubernetes-control-plane/pydocmd.yml @@ -0,0 +1,10 @@ +site_name: 'VaultLocker Block Device Encryption Layer' + +generate: + - vaultlocker.md: + - charms.layer.vaultlocker+ + +pages: + - VaultLocker Block Device Encryption Layer: vaultlocker.md + +gens_dir: docs diff --git a/kubernetes-control-plane/pyproject.toml b/kubernetes-control-plane/pyproject.toml new file mode 100644 index 0000000..db0dcd0 --- /dev/null +++ b/kubernetes-control-plane/pyproject.toml @@ -0,0 +1,3 @@ +[tool.black] +line-length=120 +target-version=['py35'] diff --git a/kubernetes-control-plane/reactive/__init__.py b/kubernetes-control-plane/reactive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-control-plane/reactive/apt.py b/kubernetes-control-plane/reactive/apt.py new file mode 100644 index 0000000..8832296 --- /dev/null +++ b/kubernetes-control-plane/reactive/apt.py @@ -0,0 +1,158 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import os.path +import subprocess +import re + +from charmhelpers import fetch +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING +from charms import layer +from charms.layer import status +from charms import reactive +from charms.reactive import when, when_not + +import charms.apt + + +@when('apt.needs_update') +def update(): + charms.apt.update() + + +@when('apt.queued_installs') +@when_not('apt.needs_update') +def install_queued(): + charms.apt.install_queued() + + +@when_not('apt.queued_installs') +def ensure_package_status(): + charms.apt.ensure_package_status() + + +def filter_installed_packages(packages): + # Don't use fetch.filter_installed_packages, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off + cmd = ['dpkg-query', '--show', r'--showformat=${Package}\n'] + installed = set(subprocess.check_output(cmd, universal_newlines=True).split()) + + # list of packages that are not installed + not_installed = set(packages) - installed + + # now we want to check for any regex in the installation of the packages + not_installed_iterable = not_installed.copy() + for pkg in not_installed_iterable: + # grab the pattern that we want to match against the packages + p = re.compile(pkg) + for pkg2 in installed: + matched = p.search(pkg2) + if matched: + not_installed.remove(pkg) + break + + return not_installed + + +def clear_removed_package_flags(): + """On hook startup, clear install flags for removed packages.""" + removed = filter_installed_packages(charms.apt.installed()) + if removed: + hookenv.log('{} missing packages ({})'.format(len(removed), ','.join(removed)), WARNING) + for package in removed: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def add_implicit_signing_keys(): + """Add keys specified in layer.yaml + + The charm can ship trusted keys, avoiding the need to specify + them in config.yaml. We need to add them before we attempt + to add any custom sources, or apt will block under Bionic + if we attempt to add a source before the key becomes trusted. + """ + opts = layer.options() + if 'apt' not in opts or 'keys' not in opts['apt']: + return + keys = opts['apt']['keys'] + for p in keys: + full_p = os.path.join(hookenv.charm_dir(), p) + if os.path.exists(full_p): + hookenv.log("Adding key {}".format(p), DEBUG) + subprocess.check_call( + ['apt-key', 'add', full_p], + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + else: + hookenv.log('Key {!r} does not exist'.format(full_p), ERROR) + + +def configure_sources(): + """Add user specified package sources from the service configuration. + + See charmhelpers.fetch.configure_sources for details. + """ + config = hookenv.config() + + # We don't have enums, so we need to validate this ourselves. + package_status = config.get('package_status') or '' + if package_status not in ('hold', 'install'): + status.blocked('Unknown package_status {}'.format(package_status)) + # Die before further hooks are run. This isn't very nice, but + # there is no other way to inform the operator that they have + # invalid configuration. + raise SystemExit(0) + + sources = config.get('install_sources') or '' + keys = config.get('install_keys') or '' + if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)): + fetch.configure_sources(update=False, sources_var='install_sources', keys_var='install_keys') + reactive.set_flag('apt.needs_update') + + # Clumsy 'config.get() or' per Bug #1641362 + extra_packages = sorted((config.get('extra_packages') or '').split()) + if extra_packages: + charms.apt.queue_install(extra_packages) + + +def queue_layer_packages(): + """Add packages listed in build-time layer options.""" + # Both basic and apt layer. basic layer will have already installed + # its defined packages, but rescheduling it here gets the apt layer + # flag set and they will pinned as any other apt layer installed + # package. + opts = layer.options() + for section in ['basic', 'apt']: + if section in opts and 'packages' in opts[section]: + charms.apt.queue_install(opts[section]['packages']) + + +hookenv.atstart(hookenv.log, 'Initializing Apt Layer') +hookenv.atstart(clear_removed_package_flags) +hookenv.atstart(add_implicit_signing_keys) +hookenv.atstart(configure_sources) +hookenv.atstart(queue_layer_packages) +hookenv.atstart(charms.apt.reset_application_version) diff --git a/kubernetes-control-plane/reactive/cdk_service_kicker.py b/kubernetes-control-plane/reactive/cdk_service_kicker.py new file mode 100644 index 0000000..f7fd33a --- /dev/null +++ b/kubernetes-control-plane/reactive/cdk_service_kicker.py @@ -0,0 +1,32 @@ +import os +import subprocess +from charms import layer +from charms.reactive import hook, when_not, remove_state, set_state +from charmhelpers.core.templating import render + + +@hook('upgrade-charm') +def upgrade_charm(): + remove_state('cdk-service-kicker.installed') + + +@when_not('cdk-service-kicker.installed') +def install_cdk_service_kicker(): + ''' Installs the cdk-service-kicker service. Workaround for + https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357 + ''' + source = 'cdk-service-kicker' + dest = '/usr/bin/cdk-service-kicker' + services = layer.options('cdk-service-kicker').get('services') + context = {'services': ' '.join(services)} + render(source, dest, context) + os.chmod('/usr/bin/cdk-service-kicker', 0o775) + + source = 'cdk-service-kicker.service' + dest = '/etc/systemd/system/cdk-service-kicker.service' + context = {} + render(source, dest, context) + command = ['systemctl', 'enable', 'cdk-service-kicker'] + subprocess.check_call(command) + + set_state('cdk-service-kicker.installed') diff --git a/kubernetes-control-plane/reactive/coordinator.py b/kubernetes-control-plane/reactive/coordinator.py new file mode 100644 index 0000000..474a95d --- /dev/null +++ b/kubernetes-control-plane/reactive/coordinator.py @@ -0,0 +1,71 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charms.coordinator import coordinator, log +import charms.reactive + + +def initialize_coordinator_state(): + ''' + The coordinator.granted.{lockname} state will be set and the + coordinator.requested.{lockname} state removed for every lock + granted to the currently running hook. + + The coordinator.requested.{lockname} state will remain set for locks + not yet granted + ''' + log('Initializing coordinator layer') + + requested = set(coordinator.requests.get(hookenv.local_unit(), {}).keys()) + previously_requested = set(state.split('.', 2)[2] + for state in charms.reactive.bus.get_states() + if state.startswith('coordinator.requested.')) + + granted = set(coordinator.grants.get(hookenv.local_unit(), {}).keys()) + previously_granted = set(state.split('.', 2)[2] + for state in charms.reactive.bus.get_states() + if state.startswith('coordinator.granted.')) + + # Set reactive state for requested locks. + for lock in requested: + log('Requested {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.set_state('coordinator.requested.{}'.format(lock)) + + # Set reactive state for locks that have been granted. + for lock in granted: + log('Granted {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.set_state('coordinator.granted.{}'.format(lock)) + + # Remove reactive state for locks that have been released. + for lock in (previously_granted - granted): + log('Dropped {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.remove_state('coordinator.granted.{}'.format(lock)) + + # Remove requested state for locks no longer requested and not granted. + for lock in (previously_requested - requested - granted): + log('Request for {} lock was dropped'.format(lock), hookenv.DEBUG) + charms.reactive.remove_state('coordinator.requested.{}'.format(lock)) + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(charms.reactive, '_coordinator_registered'): + hookenv.atstart(initialize_coordinator_state) + charms.reactive._coordinator_registered = True diff --git a/kubernetes-control-plane/reactive/hacluster.py b/kubernetes-control-plane/reactive/hacluster.py new file mode 100644 index 0000000..4560270 --- /dev/null +++ b/kubernetes-control-plane/reactive/hacluster.py @@ -0,0 +1,110 @@ +from charms import layer + +from charms.reactive import hook +from charms.reactive import when, when_not, clear_flag, set_flag, is_flag_set +from charms.reactive import endpoint_from_flag + +from charms.layer.kubernetes_common import get_ingress_address + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +db = unitdata.kv() + + +@hook('upgrade-charm') +def do_upgrade(): + # bump the services from upstart to systemd. :-/ + hacluster = endpoint_from_flag('ha.connected') + if not hacluster: + return + + if not is_flag_set('layer-hacluster.upgraded-systemd'): + services = db.get('layer-hacluster.services', {'current_services': {}, + 'desired_services': {}, + 'deleted_services': {}}) + for name, service in services['current_services'].items(): + hookenv.log("changing service {} to systemd service".format(name)) + hacluster.remove_init_service(name, service) + hacluster.add_systemd_service(name, service) + + # change any pending lsb entries to systemd + for name, service in services['desired_services'].items(): + msg = "changing pending service {} to systemd service" + hookenv.log(msg.format(name)) + hacluster.remove_init_service(name, service) + hacluster.add_systemd_service(name, service) + + clear_flag('layer-hacluster.configured') + set_flag('layer-hacluster.upgraded-systemd') + + +@when('ha.connected') +@when_not('layer-hacluster.configured') +def configure_hacluster(): + """Configure HA resources in corosync""" + hacluster = endpoint_from_flag('ha.connected') + vips = hookenv.config('ha-cluster-vip').split() + dns_record = hookenv.config('ha-cluster-dns') + if vips and dns_record: + set_flag('layer-hacluster.dns_vip.invalid') + msg = "Unsupported configuration. " \ + "ha-cluster-vip and ha-cluster-dns cannot both be set", + hookenv.log(msg) + return + else: + clear_flag('layer-hacluster.dns_vip.invalid') + if vips: + for vip in vips: + hacluster.add_vip(hookenv.application_name(), vip) + elif dns_record: + layer_options = layer.options('hacluster') + binding_address = layer_options.get('binding_address') + ip = get_ingress_address(binding_address) + hacluster.add_dnsha(hookenv.application_name(), ip, dns_record, + 'public') + + services = db.get('layer-hacluster.services', {'current_services': {}, + 'desired_services': {}, + 'deleted_services': {}}) + for name, service in services['deleted_services'].items(): + hacluster.remove_systemd_service(name, service) + for name, service in services['desired_services'].items(): + hacluster.add_systemd_service(name, service) + services['current_services'][name] = service + + services['deleted_services'] = {} + services['desired_services'] = {} + + hacluster.bind_resources() + set_flag('layer-hacluster.configured') + + +@when('config.changed.ha-cluster-vip', + 'ha.connected') +def update_vips(): + hacluster = endpoint_from_flag('ha.connected') + config = hookenv.config() + original_vips = set(config.previous('ha-cluster-vip').split()) + new_vips = set(config['ha-cluster-vip'].split()) + old_vips = original_vips - new_vips + + for vip in old_vips: + hacluster.remove_vip(hookenv.application_name(), vip) + + clear_flag('layer-hacluster.configured') + + +@when('config.changed.ha-cluster-dns', + 'ha.connected') +def update_dns(): + hacluster = endpoint_from_flag('ha.connected') + config = hookenv.config() + original_dns = set(config.previous('ha-cluster-dns').split()) + new_dns = set(config['ha-cluster-dns'].split()) + old_dns = original_dns - new_dns + + for dns in old_dns: + hacluster.remove_dnsha(hookenv.application_name, 'public') + + clear_flag('layer-hacluster.configured') diff --git a/kubernetes-control-plane/reactive/kubernetes_control_plane.py b/kubernetes-control-plane/reactive/kubernetes_control_plane.py new file mode 100644 index 0000000..98e9d33 --- /dev/null +++ b/kubernetes-control-plane/reactive/kubernetes_control_plane.py @@ -0,0 +1,3719 @@ +#!/usr/local/sbin/charm-env python3 + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import os +import re +import socket +import traceback +import yaml + +from itertools import filterfalse +from shutil import move, copyfile +from pathlib import Path +from subprocess import check_call, call +from subprocess import check_output +from subprocess import CalledProcessError +from urllib.request import Request, urlopen + +import charms.coordinator +from charms.layer import snap +from charms.leadership import leader_get, leader_set +from charms.reactive import hook +from charms.reactive import remove_state, clear_flag +from charms.reactive import get_flags, set_state, set_flag +from charms.reactive import is_state, is_flag_set, get_unset_flags +from charms.reactive import endpoint_from_flag, endpoint_from_name +from charms.reactive import when, when_any, when_not, when_none +from charms.reactive import register_trigger +from charms.reactive import data_changed, any_file_changed + +from charms.layer import tls_client +from charms.layer import vaultlocker +from charms.layer import vault_kv + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import unitdata +from charmhelpers.core.host import restart_on_change +from charmhelpers.core.host import ( + service_pause, + service_resume, + service_running, + service_stop, +) +from charmhelpers.core.templating import render +from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.storage.linux.ceph import CephBrokerRq + +from charms.layer import kubernetes_control_plane +from charms.layer import kubernetes_common + +from charms.layer.kubernetes_common import kubeclientconfig_path +from charms.layer.kubernetes_common import migrate_resource_checksums +from charms.layer.kubernetes_common import check_resources_for_upgrade_needed +from charms.layer.kubernetes_common import ( + calculate_and_store_resource_checksums, +) # noqa +from charms.layer.kubernetes_common import arch +from charms.layer.kubernetes_common import service_restart +from charms.layer.kubernetes_common import get_ingress_address +from charms.layer.kubernetes_common import get_ingress_address6 +from charms.layer.kubernetes_common import create_kubeconfig +from charms.layer.kubernetes_common import get_service_ip +from charms.layer.kubernetes_common import configure_kubernetes_service +from charms.layer.kubernetes_common import cloud_config_path +from charms.layer.kubernetes_common import encryption_config_path +from charms.layer.kubernetes_common import write_gcp_snap_config +from charms.layer.kubernetes_common import generate_openstack_cloud_config +from charms.layer.kubernetes_common import write_azure_snap_config +from charms.layer.kubernetes_common import configure_kube_proxy +from charms.layer.kubernetes_common import kubeproxyconfig_path +from charms.layer.kubernetes_common import get_version +from charms.layer.kubernetes_common import retry +from charms.layer.kubernetes_common import ca_crt_path +from charms.layer.kubernetes_common import server_crt_path +from charms.layer.kubernetes_common import server_key_path +from charms.layer.kubernetes_common import client_crt_path +from charms.layer.kubernetes_common import client_key_path +from charms.layer.kubernetes_common import kubectl, kubectl_manifest, kubectl_success +from charms.layer.kubernetes_common import _get_vmware_uuid +from charms.layer.kubernetes_common import get_node_name +from charms.layer.kubernetes_common import get_sandbox_image_uri +from charms.layer.kubernetes_common import kubelet_kubeconfig_path + +from charms.layer.kubernetes_node_base import LabelMaker + +from charms.layer.nagios import install_nagios_plugin_from_file +from charms.layer.nagios import remove_nagios_plugin + + +# Override the default nagios shortname regex to allow periods, which we +# need because our bin names contain them (e.g. 'snap.foo.daemon'). The +# default regex in charmhelpers doesn't allow periods, but nagios itself does. +nrpe.Check.shortname_re = r"[\.A-Za-z0-9-_]+$" + +snap_resources = [ + "kubectl", + "kube-apiserver", + "kube-controller-manager", + "kube-scheduler", + "cdk-addons", + "kube-proxy", + "kubelet", +] + +control_plane_services = [ + "kube-apiserver", + "kube-controller-manager", + "kube-scheduler", + "kube-proxy", + "kubelet", +] + +cohort_snaps = snap_resources + + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") +db = unitdata.kv() +checksum_prefix = "kubernetes-master.resource-checksums." +configure_prefix = "kubernetes-master.prev_args." +keystone_root = "/root/cdk/keystone" +keystone_policy_path = os.path.join(keystone_root, "keystone-policy.yaml") +kubecontrollermanagerconfig_path = "/root/cdk/kubecontrollermanagerconfig" +kubeschedulerconfig_path = "/root/cdk/kubeschedulerconfig" +cdk_addons_kubectl_config_path = "/root/cdk/cdk_addons_kubectl_config" +kubernetes_logs = "/var/log/kubernetes/" +aws_iam_webhook = "/root/cdk/aws-iam-webhook.yaml" +auth_webhook_root = "/root/cdk/auth-webhook" +auth_webhook_conf = os.path.join(auth_webhook_root, "auth-webhook-conf.yaml") +auth_webhook_exe = os.path.join(auth_webhook_root, "auth-webhook.py") +auth_webhook_svc_name = "cdk.master.auth-webhook" +auth_webhook_svc = "/etc/systemd/system/{}.service".format(auth_webhook_svc_name) +tls_ciphers_intermediate = [ + # https://wiki.mozilla.org/Security/Server_Side_TLS + # https://ssl-config.mozilla.org/#server=go&config=intermediate + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +] + + +register_trigger( + when="endpoint.aws.ready", + set_flag="kubernetes-control-plane.aws.changed", # when set +) +register_trigger( + when_not="endpoint.aws.ready", # when cleared + set_flag="kubernetes-control-plane.aws.changed", +) +register_trigger( + when="endpoint.azure.ready", + set_flag="kubernetes-control-plane.azure.changed", # when set +) +register_trigger( + when_not="endpoint.azure.ready", # when cleared + set_flag="kubernetes-control-plane.azure.changed", +) +register_trigger( + when="endpoint.gcp.ready", + set_flag="kubernetes-control-plane.gcp.changed", # when set +) +register_trigger( + when_not="endpoint.gcp.ready", # when cleared + set_flag="kubernetes-control-plane.gcp.changed", +) +register_trigger( + when="keystone-credentials.available", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when_not="keystone-credentials.available", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when="kubernetes-control-plane.aws.changed", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when="kubernetes-control-plane.azure.changed", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when="kubernetes-control-plane.gcp.changed", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when="kubernetes-control-plane.openstack.changed", set_flag="cdk-addons.reconfigure" +) +register_trigger( + when_not="cni.available", clear_flag="kubernetes-control-plane.components.started" +) +register_trigger( + when="kube-control.requests.changed", clear_flag="authentication.setup" +) +register_trigger( + when_not="kubernetes-control-plane.apiserver.configured", + clear_flag="kubernetes-control-plane.apiserver.running", +) +register_trigger( + when="config.changed.image-registry", + clear_flag="kubernetes-control-plane.kubelet.configured", +) +register_trigger( + when="config.changed.image-registry", + clear_flag="kubernetes-control-plane.sent-registry", +) +register_trigger( + when="config.changed.default-cni", + clear_flag="kubernetes-control-plane.default-cni.configured", +) +register_trigger( + when_not="ceph-client.connected", + clear_flag="kubernetes-control-plane.ceph.pools.created", +) +register_trigger( + when_not="ceph-client.connected", + clear_flag="kubernetes-control-plane.ceph.permissions.requested", +) +register_trigger( + when="ceph-client.available", + clear_flag="kubernetes-control-plane.apiserver.configured", +) +register_trigger( + when_not="ceph-client.available", + clear_flag="kubernetes-control-plane.apiserver.configured", +) + + +def set_upgrade_needed(forced=False): + set_state("kubernetes-control-plane.upgrade-needed") + config = hookenv.config() + previous_channel = config.previous("channel") + require_manual = config.get("require-manual-upgrade") + hookenv.log("set upgrade needed") + if previous_channel is None or not require_manual or forced: + hookenv.log("forcing upgrade") + set_state("kubernetes-control-plane.upgrade-specified") + + +@when("config.changed.channel") +def channel_changed(): + set_upgrade_needed() + + +def maybe_install_kubelet(): + if not snap.is_installed("kubelet"): + channel = hookenv.config("channel") + hookenv.status_set("maintenance", "Installing kubelet snap") + snap.install("kubelet", channel=channel, classic=True) + calculate_and_store_resource_checksums(checksum_prefix, snap_resources) + + +def maybe_install_kube_proxy(): + if not snap.is_installed("kube-proxy"): + channel = hookenv.config("channel") + hookenv.status_set("maintenance", "Installing kube-proxy snap") + snap.install("kube-proxy", channel=channel, classic=True) + calculate_and_store_resource_checksums(checksum_prefix, snap_resources) + + +@hook("install") +def fresh_install(): + # fresh installs should always send the unique cluster tag to cdk-addons + set_state("kubernetes-control-plane.cdk-addons.unique-cluster-tag") + + +@hook("upgrade-charm") +def check_for_upgrade_needed(): + """An upgrade charm event was triggered by Juju, react to that here.""" + hookenv.status_set("maintenance", "Checking resources") + is_leader = is_state("leadership.is_leader") + + # migrate to inclusive flags + old, new = "kubernetes-master", "kubernetes-control-plane" # wokeignore:rule=master + for flag in get_flags(): + if flag.startswith(old): + new_flag = flag.replace(old, new, 1) + clear_flag(flag) + set_flag(new_flag) + + # migrate to new flags + if is_state("kubernetes-control-plane.restarted-for-cloud"): + remove_state("kubernetes-control-plane.restarted-for-cloud") + set_state("kubernetes-control-plane.cloud.ready") + if is_state("kubernetes-control-plane.cloud-request-sent"): + # minor change, just for consistency + remove_state("kubernetes-control-plane.cloud-request-sent") + set_state("kubernetes-control-plane.cloud.request-sent") + if is_flag_set("kubernetes-control-plane.snaps.installed"): + # consistent with layer-kubernetes-node-base + remove_state("kubernetes-control-plane.snaps.installed") + set_state("kubernetes-node.snaps.installed") + + # ceph-storage.configured flag no longer exists + remove_state("ceph-storage.configured") + + # kubernetes-control-plane.ceph.configured flag no longer exists + remove_state("kubernetes-control-plane.ceph.configured") + + maybe_install_kubelet() + maybe_install_kube_proxy() + update_certificates() + switch_auth_mode(forced=True) + + # File-based auth is gone in 1.19; ensure any entries in basic_auth.csv are + # added to known_tokens.csv, and any known_tokens entries are created as secrets. + if not is_flag_set("kubernetes-control-plane.basic-auth.migrated"): + if kubernetes_control_plane.migrate_auth_file( + kubernetes_control_plane.AUTH_BASIC_FILE + ): + set_flag("kubernetes-control-plane.basic-auth.migrated") + else: + hookenv.log( + "Unable to migrate {} to {}".format( + kubernetes_control_plane.AUTH_BASIC_FILE, + kubernetes_control_plane.AUTH_TOKENS_FILE, + ) + ) + if not is_flag_set("kubernetes-control-plane.token-auth.migrated"): + register_auth_webhook() + add_rbac_roles() + if kubernetes_control_plane.migrate_auth_file( + kubernetes_control_plane.AUTH_TOKENS_FILE + ): + set_flag("kubernetes-control-plane.token-auth.migrated") + else: + hookenv.log( + "Unable to migrate {} to Kubernetes secrets".format( + kubernetes_control_plane.AUTH_TOKENS_FILE + ) + ) + set_state("reconfigure.authentication.setup") + remove_state("authentication.setup") + + if not db.get("snap.resources.fingerprint.initialised"): + # We are here on an upgrade from non-rolling control plane + # Since this upgrade might also include resource updates eg + # juju upgrade-charm kubernetes-control-plane --resource kube-any=my.snap + # we take no risk and forcibly upgrade the snaps. + # Forcibly means we do not prompt the user to call the upgrade action. + set_upgrade_needed(forced=True) + + migrate_resource_checksums(checksum_prefix, snap_resources) + if check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + set_upgrade_needed() + + # Set the auto storage backend to etcd2. + auto_storage_backend = leader_get("auto_storage_backend") + if not auto_storage_backend and is_leader: + leader_set(auto_storage_backend="etcd2") + + if is_leader and not leader_get("auto_dns_provider"): + was_kube_dns = hookenv.config().previous("enable-kube-dns") + if was_kube_dns is True: + leader_set(auto_dns_provider="kube-dns") + elif was_kube_dns is False: + leader_set(auto_dns_provider="none") + + if is_flag_set("nrpe-external-master.available"): + update_nrpe_config() + + remove_state("kubernetes-control-plane.system-monitoring-rbac-role.applied") + remove_state("kubernetes-control-plane.kubelet.configured") + remove_state("kubernetes-control-plane.default-cni.configured") + remove_state("kubernetes-control-plane.sent-registry") + remove_state("kubernetes-control-plane.ceph.permissions.requested") + + # Remove services from hacluster and leave to systemd while + # hacluster is not ready to accept order and colocation constraints + if is_flag_set("ha.connected"): + hacluster = endpoint_from_flag("ha.connected") + for service in control_plane_services: + daemon = "snap.{}.daemon".format(service) + hacluster.remove_systemd_service(service, daemon) + + +@hook("pre-series-upgrade") +def pre_series_upgrade(): + """Stop the kubernetes control plane services""" + for service in control_plane_services: + service_pause("snap.%s.daemon" % service) + + +@hook("post-series-upgrade") +def post_series_upgrade(): + for service in control_plane_services: + service_resume("snap.%s.daemon" % service) + # set ourselves up to restart + remove_state("kubernetes-control-plane.components.started") + + +@hook("leader-elected") +def leader_elected(): + clear_flag("authentication.setup") + + +def add_rbac_roles(): + """Update the known_tokens file with proper groups. + + DEPRECATED: Once known_tokens are migrated, group data will be stored in K8s + secrets. Do not use this function after migrating to authn with secrets. + """ + if is_flag_set("kubernetes-control-plane.token-auth.migrated"): + hookenv.log("Known tokens have migrated to secrets. Skipping group changes") + return + tokens_fname = "/root/cdk/known_tokens.csv" + tokens_backup_fname = "/root/cdk/known_tokens.csv.backup" + move(tokens_fname, tokens_backup_fname) + with open(tokens_fname, "w") as ftokens: + with open(tokens_backup_fname, "r") as stream: + for line in stream: + if line.startswith("#"): + continue + record = line.strip().split(",") + try: + # valid line looks like: token,username,user,groups + if record[2] == "admin" and len(record) == 3: + towrite = '{0},{1},{2},"{3}"\n'.format( + record[0], record[1], record[2], "system:masters" + ) + ftokens.write(towrite) + continue + if record[2] == "kube_proxy": + towrite = "{0},{1},{2}\n".format( + record[0], "system:kube-proxy", "kube-proxy" + ) + ftokens.write(towrite) + continue + if record[2] == "kube_controller_manager": + towrite = "{0},{1},{2}\n".format( + record[0], + "system:kube-controller-manager", + "kube-controller-manager", + ) + ftokens.write(towrite) + continue + if record[2] == "kubelet" and record[1] == "kubelet": + continue + except IndexError: + msg = "Skipping invalid line from {}: {}".format( + tokens_backup_fname, line + ) + hookenv.log(msg, level=hookenv.DEBUG) + continue + else: + ftokens.write("{}".format(line)) + + +@when("kubernetes-control-plane.upgrade-specified") +def do_upgrade(): + install_snaps() + remove_state("kubernetes-control-plane.upgrade-needed") + remove_state("kubernetes-control-plane.upgrade-specified") + + +def install_snaps(): + channel = hookenv.config("channel") + hookenv.status_set("maintenance", "Installing core snap") + snap.install("core") + hookenv.status_set("maintenance", "Installing kubectl snap") + snap.install("kubectl", channel=channel, classic=True) + hookenv.status_set("maintenance", "Installing kube-apiserver snap") + snap.install("kube-apiserver", channel=channel) + hookenv.status_set("maintenance", "Installing kube-controller-manager snap") + snap.install("kube-controller-manager", channel=channel) + hookenv.status_set("maintenance", "Installing kube-scheduler snap") + snap.install("kube-scheduler", channel=channel) + hookenv.status_set("maintenance", "Installing cdk-addons snap") + snap.install("cdk-addons", channel=channel) + hookenv.status_set("maintenance", "Installing kubelet snap") + snap.install("kubelet", channel=channel, classic=True) + hookenv.status_set("maintenance", "Installing kube-proxy snap") + snap.install("kube-proxy", channel=channel, classic=True) + calculate_and_store_resource_checksums(checksum_prefix, snap_resources) + db.set("snap.resources.fingerprint.initialised", True) + set_state("kubernetes-node.snaps.installed") + remove_state("kubernetes-control-plane.components.started") + + +@when("kubernetes-node.snaps.installed", "leadership.is_leader") +@when_not("leadership.set.cohort_keys") +def create_or_update_cohort_keys(): + cohort_keys = {} + for snapname in cohort_snaps: + try: + cohort_key = snap.create_cohort_snapshot(snapname) + except CalledProcessError: + # Snap store outages prevent keys from being created; log it + # and retry later. LP:1956608 + hookenv.log( + "Failed to create cohort for {}; will retry".format(snapname), + level=hookenv.INFO, + ) + return + cohort_keys[snapname] = cohort_key + leader_set(cohort_keys=json.dumps(cohort_keys)) + hookenv.log("Snap cohort keys have been created.", level=hookenv.INFO) + + # Prime revision info so we can detect changes later + cohort_revs = kubernetes_control_plane.get_snap_revs(cohort_snaps) + data_changed("leader-cohort-revs", cohort_revs) + hookenv.log( + "Tracking cohort revisions: {}".format(cohort_revs), level=hookenv.DEBUG + ) + + +@when( + "kubernetes-node.snaps.installed", + "leadership.is_leader", + "leadership.set.cohort_keys", +) +def check_cohort_updates(): + cohort_revs = kubernetes_control_plane.get_snap_revs(cohort_snaps) + if cohort_revs and data_changed("leader-cohort-revs", cohort_revs): + leader_set(cohort_keys=None) + hookenv.log("Snap cohort revisions have changed.", level=hookenv.INFO) + + +@when("kubernetes-node.snaps.installed", "leadership.set.cohort_keys") +@when_none("coordinator.granted.cohort", "coordinator.requested.cohort") +def safely_join_cohort(): + """Coordinate the rollout of snap refreshes. + + When cohort keys change, grab a lock so that only 1 unit in the + application joins the new cohort at a time. This allows us to roll out + snap refreshes without risking all units going down at once. + """ + cohort_keys = leader_get("cohort_keys") + # NB: initial data-changed is always true + if data_changed("leader-cohorts", cohort_keys): + clear_flag("kubernetes-control-plane.cohorts.joined") + clear_flag("kubernetes-control-plane.cohorts.sent") + charms.coordinator.acquire("cohort") + + +@when( + "kubernetes-node.snaps.installed", + "leadership.set.cohort_keys", + "coordinator.granted.cohort", +) +@when_not("kubernetes-control-plane.cohorts.joined") +def join_or_update_cohorts(): + """Join or update a cohort snapshot. + + All units of this application (leader and followers) need to refresh their + installed snaps to the current cohort snapshot. + """ + cohort_keys = json.loads(leader_get("cohort_keys")) + for snapname in cohort_snaps: + cohort_key = cohort_keys[snapname] + if snap.is_installed(snapname): # we also manage workers' cohorts + hookenv.status_set("maintenance", "Joining snap cohort.") + snap.join_cohort_snapshot(snapname, cohort_key) + set_flag("kubernetes-control-plane.cohorts.joined") + hookenv.log("{} has joined the snap cohort".format(hookenv.local_unit())) + + +@when( + "kubernetes-node.snaps.installed", + "leadership.set.cohort_keys", + "kubernetes-control-plane.cohorts.joined", + "kube-control.connected", +) +@when_not("kubernetes-control-plane.cohorts.sent") +def send_cohorts(): + """Send cohort information to workers. + + If we have peers, wait until all peers are updated before sending. + Otherwise, we're a single unit k8s-cp and can fire when connected. + """ + cohort_keys = json.loads(leader_get("cohort_keys")) + kube_control = endpoint_from_flag("kube-control.connected") + kube_cps = endpoint_from_flag("kube-masters.connected") # wokeignore:rule=master + + # If we have peers, tell them we've joined the cohort. This is needed so + # we don't tell workers about cohorts until all control planes are in-sync. + goal_peers = len(list(hookenv.expected_peer_units())) + if goal_peers > 0: + if kube_cps: + # tell peers about the cohort keys + kube_cps.set_cohort_keys(cohort_keys) + else: + msg = "Waiting for {} peers before setting the cohort.".format(goal_peers) + hookenv.log(msg, level=hookenv.DEBUG) + return + + if is_flag_set("kube-masters.cohorts.ready"): + # tell workers about the cohort keys + kube_control.set_cohort_keys(cohort_keys) + hookenv.log( + "{} (peer) sent cohort keys to workers".format(hookenv.local_unit()) + ) + else: + msg = "Waiting for k8s-cps to agree on cohorts." + hookenv.log(msg, level=hookenv.DEBUG) + return + else: + # tell workers about the cohort keys + kube_control.set_cohort_keys(cohort_keys) + hookenv.log( + "{} (single) sent cohort keys to workers".format(hookenv.local_unit()) + ) + + set_flag("kubernetes-control-plane.cohorts.sent") + + +@when("etcd.available") +@when("config.changed.enable-metrics") +def enable_metric_changed(): + """ + Trigger an api server update. + + :return: None + """ + clear_flag("kubernetes-control-plane.apiserver.configured") + + if is_state("leadership.is_leader"): + configure_cdk_addons() + + +@when("config.changed.client_password", "leadership.is_leader") +def password_changed(): + """Handle password change by reconfiguring authentication.""" + remove_state("authentication.setup") + + +@when("config.changed.storage-backend") +def storage_backend_changed(): + remove_state("kubernetes-control-plane.components.started") + + +@when("leadership.is_leader") +@when_not("authentication.setup") +def setup_leader_authentication(): + """ + Setup service accounts and tokens for the cluster. + + As of 1.19 charms, this will also propogate a generic basic_auth.csv, which is + merged into known_tokens.csv, which are migrated to secrets during upgrade-charm. + """ + basic_auth = "/root/cdk/basic_auth.csv" + known_tokens = "/root/cdk/known_tokens.csv" + service_key = "/root/cdk/serviceaccount.key" + os.makedirs("/root/cdk", exist_ok=True) + + hookenv.status_set("maintenance", "Rendering authentication templates.") + + keys = [basic_auth, known_tokens, service_key] + # Try first to fetch data from an old leadership broadcast. + if not get_keys_from_leader(keys) or is_state("reconfigure.authentication.setup"): + kubernetes_control_plane.deprecate_auth_file(basic_auth) + set_flag("kubernetes-control-plane.basic-auth.migrated") + + kubernetes_control_plane.deprecate_auth_file(known_tokens) + set_flag("kubernetes-control-plane.token-auth.migrated") + + # Generate the default service account token key + if not os.path.isfile(service_key): + cmd = ["openssl", "genrsa", "-out", service_key, "2048"] + check_call(cmd) + remove_state("reconfigure.authentication.setup") + + # Write the admin token every time we setup authn to ensure we honor a + # configured password. + client_pass = hookenv.config("client_password") or get_token("admin") + setup_tokens(client_pass, "admin", "admin", "system:masters") + + create_tokens_and_sign_auth_requests() + + # send auth files to followers via leadership data + leader_data = {} + for f in [basic_auth, known_tokens, service_key]: + try: + with open(f, "r") as fp: + leader_data[f] = fp.read() + except FileNotFoundError: + pass + + # this is slightly opaque, but we are sending file contents under its file + # path as a key. + # eg: + # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} + leader_set(leader_data) + + remove_state("kubernetes-control-plane.components.started") + remove_state("kube-control.requests.changed") + set_state("authentication.setup") + + +@when_not("leadership.is_leader") +def setup_non_leader_authentication(): + basic_auth = "/root/cdk/basic_auth.csv" + known_tokens = "/root/cdk/known_tokens.csv" + service_key = "/root/cdk/serviceaccount.key" + + # Starting with 1.19, we don't use csv auth files; handle changing secrets. + secrets = { + "admin": get_token("admin"), + "kube-controller-manager": get_token("system:kube-controller-manager"), + "kube-proxy": get_token("system:kube-proxy"), + "kube-scheduler": get_token("system:kube-scheduler"), + } + if data_changed("secrets-data", secrets): + set_flag("kubernetes-control-plane.token-auth.migrated") + build_kubeconfig() + remove_state("kubernetes-control-plane.components.started") + + keys = [basic_auth, known_tokens, service_key] + # Pre-secrets, the source of truth for non-leaders is the leader. + # Therefore we overwrite_local with whatever the leader has. + if not get_keys_from_leader(keys, overwrite_local=True): + # the keys were not retrieved. Non-leaders have to retry. + return + + if any_file_changed(keys): + remove_state("kubernetes-control-plane.components.started") + + # Clear stale creds from the kube-control relation so that the leader can + # assume full control of them. + kube_control = endpoint_from_flag("kube-control.connected") + if kube_control: + kube_control.clear_creds() + + remove_state("kube-control.requests.changed") + set_state("authentication.setup") + + +def get_keys_from_leader(keys, overwrite_local=False): + """ + Gets the broadcasted keys from the leader and stores them in + the corresponding files. + + Args: + keys: list of keys. Keys are actually files on the FS. + + Returns: True if all key were fetched, False if not. + + """ + # This races with other codepaths, and seems to require being created first + # This block may be extracted later, but for now seems to work as intended + os.makedirs("/root/cdk", exist_ok=True) + + for k in keys: + # If the path does not exist, assume we need it + if not os.path.exists(k) or overwrite_local: + # Fetch data from leadership broadcast + contents = leader_get(k) + # Default to logging the warning and wait for leader data to be set + if contents is None: + hookenv.log("Missing content for file {}".format(k)) + return False + # Write out the file and move on to the next item + with open(k, "w+") as fp: + fp.write(contents) + fp.write("\n") + + return True + + +@when("kubernetes-node.snaps.installed") +def set_app_version(): + """Declare the application version to juju""" + version = check_output(["kube-apiserver", "--version"]) + hookenv.application_version_set(version.split(b" v")[-1].rstrip()) + + +@hookenv.atstart +def check_vault_pending(): + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + vault_kv_goal = "vault-kv" in goal_state.get("relations", {}) + vault_kv_connected = is_state("vault-kv.connected") + vault_kv_related = vault_kv_goal or vault_kv_connected + vault_kv_ready = is_state("layer.vault-kv.ready") + if vault_kv_related and not vault_kv_ready: + set_flag("kubernetes-control-plane.vault-kv.pending") + else: + clear_flag("kubernetes-control-plane.vault-kv.pending") + + +@hookenv.atexit +def set_final_status(): + """Set the final status of the charm as we leave hook execution""" + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + + if is_flag_set("upgrade.series.in-progress"): + hookenv.status_set("blocked", "Series upgrade in progress") + return + + if not is_flag_set("certificates.available"): + if "certificates" in goal_state.get("relations", {}): + hookenv.status_set("waiting", "Waiting for certificates authority.") + else: + hookenv.status_set("blocked", "Missing relation to certificate authority.") + return + + if is_flag_set("kubernetes-control-plane.secure-storage.failed"): + hookenv.status_set( + "blocked", + "Failed to configure encryption; " + "secrets are unencrypted or inaccessible", + ) + return + elif is_flag_set("kubernetes-control-plane.secure-storage.created"): + if not encryption_config_path().exists(): + hookenv.status_set( + "blocked", "VaultLocker containing encryption config unavailable" + ) + return + + vsphere_joined = is_state("endpoint.vsphere.joined") + azure_joined = is_state("endpoint.azure.joined") + cloud_blocked = is_state("kubernetes-control-plane.cloud.blocked") + if vsphere_joined and cloud_blocked: + hookenv.status_set( + "blocked", "vSphere integration requires K8s 1.12 or greater" + ) + return + if azure_joined and cloud_blocked: + hookenv.status_set("blocked", "Azure integration requires K8s 1.11 or greater") + return + if not is_flag_set("kubernetes.cni-plugins.installed"): + hookenv.status_set("blocked", "Missing CNI resource") + return + if is_state("kubernetes-control-plane.cloud.pending"): + hookenv.status_set("waiting", "Waiting for cloud integration") + return + + if "kube-api-endpoint" in goal_state.get("relations", {}): + if not is_state("kube-api-endpoint.available"): + hookenv.status_set("waiting", "Waiting for kube-api-endpoint relation") + return + + for lb_endpoint in ("loadbalancer-internal", "loadbalancer-external"): + if lb_endpoint in goal_state.get("relations", {}): + lb_provider = endpoint_from_name(lb_endpoint) + if not lb_provider.has_response: + hookenv.status_set("waiting", "Waiting for " + lb_endpoint) + return + + if not is_state("kube-control.connected"): + if "kube-control" in goal_state.get("relations", {}): + status = "waiting" + else: + status = "blocked" + hookenv.status_set(status, "Waiting for workers.") + return + + ks = endpoint_from_flag("keystone-credentials.available") + if ks and ks.api_version() == "2": + msg = "Keystone auth v2 detected. v3 is required." + hookenv.status_set("blocked", msg) + return + + upgrade_needed = is_state("kubernetes-control-plane.upgrade-needed") + upgrade_specified = is_state("kubernetes-control-plane.upgrade-specified") + if upgrade_needed and not upgrade_specified: + msg = "Needs manual upgrade, run the upgrade action" + hookenv.status_set("blocked", msg) + return + + try: + get_dns_provider() + except InvalidDnsProvider as e: + if e.value == "core-dns": + msg = "dns-provider=core-dns requires k8s 1.14+" + else: + msg = "dns-provider=%s is invalid" % e.value + hookenv.status_set("blocked", msg) + return + + if is_state("kubernetes-control-plane.vault-kv.pending"): + hookenv.status_set( + "waiting", "Waiting for encryption info from Vault to secure secrets" + ) + return + + if is_state("kubernetes-control-plane.had-service-cidr-expanded"): + hookenv.status_set( + "waiting", "Waiting to retry updates for service-cidr expansion" + ) + return + + if not is_state("etcd.available"): + if "etcd" in goal_state.get("relations", {}): + status = "waiting" + else: + status = "blocked" + hookenv.status_set(status, "Waiting for etcd") + return + + if not is_state("cni.available"): + if "cni" in goal_state.get("relations", {}): + status = "waiting" + else: + status = "blocked" + hookenv.status_set(status, "Waiting for CNI plugins to become available") + return + + if not is_state("tls_client.certs.saved"): + hookenv.status_set("waiting", "Waiting for certificates") + return + + if not is_flag_set("kubernetes-control-plane.auth-webhook-service.started"): + hookenv.status_set("waiting", "Waiting for auth-webhook service to start") + return + + if not is_flag_set("kubernetes-control-plane.apiserver.configured"): + hookenv.status_set("waiting", "Waiting for API server to be configured") + return + + if not is_flag_set("kubernetes-control-plane.apiserver.running"): + hookenv.status_set("waiting", "Waiting for API server to start") + return + + authentication_setup = is_state("authentication.setup") + if not authentication_setup: + hookenv.status_set("waiting", "Waiting for crypto keys.") + return + + if not is_flag_set("kubernetes-control-plane.auth-webhook-tokens.setup"): + hookenv.status_set("waiting", "Waiting for auth-webhook tokens") + return + + if is_state("kubernetes-control-plane.components.started"): + # All services should be up and running at this point. Double-check... + failing_services = control_plane_services_down() + if len(failing_services) != 0: + msg = "Stopped services: {}".format(",".join(failing_services)) + hookenv.status_set("blocked", msg) + if is_flag_set("ha.connected"): + hookenv.log("Disabling node to pass resources to other nodes") + cmd = "crm -w -F node standby" + call(cmd.split()) + for service in failing_services: + heal_handler = HEAL_HANDLER[service] + for flag in heal_handler["clear_flags"]: + clear_flag(flag) + heal_handler["run"]() + set_flag("kubernetes-control-plane.components.failed") + return + else: + if is_flag_set("kubernetes-control-plane.components.failed"): + if is_flag_set("ha.connected"): + hookenv.log("Enabling node again to receive resources") + cmd = "crm -w -F node online" + call(cmd.split()) + clear_flag("kubernetes-control-plane.components.failed") + + else: + # if we don't have components starting, we're waiting for that and + # shouldn't fall through to Kubernetes control plane running. + hookenv.status_set( + "maintenance", "Waiting for control plane components to start" + ) + return + + # Note that after this point, kubernetes-control-plane.components.started is + # always True. + + is_leader = is_state("leadership.is_leader") + addons_configured = is_state("cdk-addons.configured") + if is_leader and not addons_configured: + hookenv.status_set("waiting", "Waiting to retry addon deployment") + return + + if is_leader and not is_state( + "kubernetes-control-plane.system-monitoring-rbac-role.applied" + ): + msg = "Waiting to retry applying system:monitoring RBAC role" + hookenv.status_set("waiting", msg) + return + + try: + unready = get_kube_system_pods_not_running() + except FailedToGetPodStatus: + hookenv.status_set("waiting", "Waiting for kube-system pods to start") + return + + if unready: + plural = "s" if len(unready) > 1 else "" + msg = "Waiting for {} kube-system pod{} to start" + msg = msg.format(len(unready), plural) + hookenv.status_set("waiting", msg) + return + + service_cidr = kubernetes_control_plane.service_cidr() + if hookenv.config("service-cidr") != service_cidr: + msg = "WARN: cannot change service-cidr, still using " + service_cidr + hookenv.status_set("active", msg) + return + + gpu_available = is_state("kube-control.gpu.available") + gpu_enabled = is_state("kubernetes-control-plane.gpu.enabled") + if gpu_available and not gpu_enabled: + msg = 'GPUs available. Set allow-privileged="auto" to enable.' + hookenv.status_set("active", msg) + return + + if is_flag_set("ceph-storage.available"): + hookenv.status_set( + "blocked", "ceph-storage relation deprecated, use ceph-client instead" + ) + return + + if is_flag_set("ceph-client.connected") and not is_flag_set( + "ceph-client.available" + ): + hookenv.status_set("waiting", "Waiting for Ceph to provide a key.") + return + + if ( + is_leader + and ks + and is_flag_set("kubernetes-control-plane.keystone-policy-error") + ): + hookenv.status_set("blocked", "Invalid keystone policy file.") + return + + if ( + is_leader + and ks + and not is_flag_set("kubernetes-control-plane.keystone-policy-handled") + ): + hookenv.status_set("waiting", "Waiting to apply keystone policy file.") + return + + if hookenv.config("enable-metrics") and not hookenv.config( + "api-aggregation-extension" + ): + hookenv.status_set( + "blocked", + "metrics service will be unreachable without api-aggregation-extension.", + ) + return + + hookenv.status_set("active", "Kubernetes control-plane running.") + + +def control_plane_services_down(): + """Ensure control plane services are up and running. + + Return: list of failing services""" + return list( + filterfalse(kubernetes_control_plane.check_service, control_plane_services) + ) + + +def add_systemd_file_limit(): + directory = "/etc/systemd/system/snap.kube-apiserver.daemon.service.d" + if not os.path.isdir(directory): + os.makedirs(directory) + + file_name = "file-limit.conf" + path = os.path.join(directory, file_name) + if not os.path.isfile(path): + with open(path, "w") as f: + f.write("[Service]\n") + f.write("LimitNOFILE=65535") + + +def add_systemd_restart_always(): + template = "templates/service-always-restart.systemd-latest.conf" + + try: + # Get the systemd version + cmd = ["systemd", "--version"] + output = check_output(cmd).decode("UTF-8") + line = output.splitlines()[0] + words = line.split() + assert words[0] == "systemd" + systemd_version = int(words[1]) + + # Check for old version (for xenial support) + if systemd_version < 230: + template = "templates/service-always-restart.systemd-229.conf" + except Exception: + traceback.print_exc() + hookenv.log( + "Failed to detect systemd version, using latest template", level="ERROR" + ) + + for service in control_plane_services: + dest_dir = "/etc/systemd/system/snap.{}.daemon.service.d".format(service) + os.makedirs(dest_dir, exist_ok=True) + copyfile(template, "{}/always-restart.conf".format(dest_dir)) + + +def add_systemd_file_watcher(): + """Setup systemd file-watcher service. + + This service watches these files for changes: + + /root/cdk/known_tokens.csv + /root/cdk/serviceaccount.key + + If a file is changed, the service uses juju-run to invoke a script in a + hook context on this unit. If this unit is the leader, the script will + call leader-set to distribute the contents of these files to the + non-leaders so they can sync their local copies to match. + + """ + render( + "cdk.master.leader.file-watcher.sh", + "/usr/local/sbin/cdk.master.leader.file-watcher.sh", + {}, + perms=0o777, + ) + render( + "cdk.master.leader.file-watcher.service", + "/etc/systemd/system/cdk.master.leader.file-watcher.service", + {"unit": hookenv.local_unit()}, + perms=0o644, + ) + render( + "cdk.master.leader.file-watcher.path", + "/etc/systemd/system/cdk.master.leader.file-watcher.path", + {}, + perms=0o644, + ) + service_resume("cdk.master.leader.file-watcher.path") + + +@when("etcd.available", "tls_client.certs.saved") +@restart_on_change( + { + auth_webhook_conf: [auth_webhook_svc_name], + auth_webhook_exe: [auth_webhook_svc_name], + auth_webhook_svc: [auth_webhook_svc_name], + } +) +def register_auth_webhook(): + """Render auth webhook templates and start the related service.""" + Path(auth_webhook_root).mkdir(exist_ok=True) + + # For 'api_ver', match the api version of the authentication.k8s.io TokenReview + # that k8s-apiserver will be sending: + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18 + context = { + "api_ver": "v1beta1", + "charm_dir": hookenv.charm_dir(), + "host": get_ingress_address( + "kube-api-endpoint", ignore_addresses=[hookenv.config("ha-cluster-vip")] + ), + "pidfile": "{}.pid".format(auth_webhook_svc_name), + "logfile": "{}.log".format(auth_webhook_svc_name), + "port": 5000, + "root_dir": auth_webhook_root, + } + + context["aws_iam_endpoint"] = None + if endpoint_from_flag("endpoint.aws-iam.ready"): + aws_webhook = Path(aws_iam_webhook) + if aws_webhook.exists(): + aws_yaml = yaml.safe_load(aws_webhook.read_text()) + try: + context["aws_iam_endpoint"] = aws_yaml["clusters"][0]["cluster"][ + "server" + ] + except (KeyError, TypeError): + hookenv.log( + "Unable to find server in AWS IAM webhook: {}".format(aws_yaml) + ) + pass + + context["keystone_endpoint"] = None + if endpoint_from_flag("keystone-credentials.available"): + ks_webhook = Path(keystone_root) / "webhook.yaml" + if ks_webhook.exists(): + ks_yaml = yaml.safe_load(ks_webhook.read_text()) + try: + context["keystone_endpoint"] = ks_yaml["clusters"][0]["cluster"][ + "server" + ] + except (KeyError, TypeError): + hookenv.log( + "Unable to find server in Keystone webhook: {}".format(ks_yaml) + ) + pass + + context["custom_authn_endpoint"] = None + custom_authn = hookenv.config("authn-webhook-endpoint") + if custom_authn: + context["custom_authn_endpoint"] = custom_authn + + k8s_log_path = Path(kubernetes_logs) + k8s_log_path.mkdir(parents=True, exist_ok=True) # ensure log path exists + render("cdk.master.auth-webhook-conf.yaml", auth_webhook_conf, context) + render("cdk.master.auth-webhook.py", auth_webhook_exe, context) + render( + "cdk.master.auth-webhook.logrotate", "/etc/logrotate.d/auth-webhook", context + ) + + # Move existing log files from ${auth_webhook_root} to /var/log/kubernetes/ + for log_file in Path(auth_webhook_root).glob("auth-webhook.log*"): + # all historical log files (.log, .log.1 and .log.3.tgz) + new_log_file = k8s_log_path / ("cdk.master." + log_file.name) + if not new_log_file.exists(): + move(str(log_file), str(new_log_file)) + + # Set the number of gunicorn workers based on our core count. (2*cores)+1 is + # recommended: https://docs.gunicorn.org/en/stable/design.html#how-many-workers + try: + cores = int(check_output(["nproc"]).decode("utf-8").strip()) + except CalledProcessError: + # Our default architecture is 2-cores for k8s-cp units + cores = 2 + else: + # Put an upper bound on cores; more than 12ish workers is overkill + cores = 6 if cores > 6 else cores + context["num_workers"] = cores * 2 + 1 + render("cdk.master.auth-webhook.service", auth_webhook_svc, context) + if any_file_changed([auth_webhook_svc]): + # if the service file has changed (or is new), + # we have to inform systemd about it + check_call(["systemctl", "daemon-reload"]) + if not is_flag_set("kubernetes-control-plane.auth-webhook-service.started"): + if service_resume(auth_webhook_svc_name): + set_flag("kubernetes-control-plane.auth-webhook-service.started") + clear_flag("kubernetes-control-plane.apiserver.configured") + else: + hookenv.status_set( + "maintenance", "Waiting for {} to start.".format(auth_webhook_svc_name) + ) + hookenv.log("{} failed to start; will retry".format(auth_webhook_svc_name)) + + +@when( + "kubernetes-control-plane.apiserver.running", + "kubernetes-control-plane.auth-webhook-service.started", + "authentication.setup", +) +@when_not("kubernetes-control-plane.auth-webhook-tokens.setup") +def setup_auth_webhook_tokens(): + """Reconfigure authentication to setup auth-webhook tokens. + + If authentication has been setup with a non-auth-webhook configuration, + convert it to use auth-webhook tokens instead. Alternatively, if the + auth-webhook setup failed, this will also ensure that it is retried. + """ + # Even if the apiserver is configured, it may not be fully started. Only + # proceed if we can get secrets. + if not kubectl_success("get", "secrets"): + hookenv.log("Secrets are not yet available; will retry") + return + if create_tokens_and_sign_auth_requests(): + # Force setup_leader_authentication to be re-run. + remove_state("authentication.setup") + + +@when( + "etcd.available", + "tls_client.certs.saved", + "authentication.setup", + "leadership.set.auto_storage_backend", + "leadership.set.cluster_tag", + "cni.available", +) +@when_not( + "kubernetes-control-plane.components.started", + "kubernetes-control-plane.cloud.pending", + "kubernetes-control-plane.cloud.blocked", + "kubernetes-control-plane.vault-kv.pending", + "tls_client.certs.changed", + "tls_client.ca.written", + "upgrade.series.in-progress", +) +def start_control_plane(): + """Run the Kubernetes control-plane components.""" + hookenv.status_set( + "maintenance", "Configuring the Kubernetes control plane services." + ) + + if not is_state("kubernetes-control-plane.vault-kv.pending") and not is_state( + "kubernetes-control-plane.secure-storage.created" + ): + encryption_config_path().parent.mkdir(parents=True, exist_ok=True) + host.write_file( + path=str(encryption_config_path()), + perms=0o600, + content=yaml.safe_dump( + { + "kind": "EncryptionConfig", + "apiVersion": "v1", + "resources": [ + {"resources": ["secrets"], "providers": [{"identity": {}}]} + ], + } + ), + ) + + kubernetes_control_plane.freeze_service_cidr() + + etcd = endpoint_from_flag("etcd.available") + if not etcd.get_connection_string(): + # etcd is not returning a connection string. This happens when + # the control-plane unit disconnects from etcd and is ready to terminate. + # No point in trying to start control-plane services and fail. Just return. + return + + # TODO: Make sure below relation is handled on change + # https://github.com/kubernetes/kubernetes/issues/43461 + handle_etcd_relation(etcd) + + # Set up additional systemd services + add_systemd_restart_always() + add_systemd_file_limit() + add_systemd_file_watcher() + add_systemd_iptables_patch() + check_call(["systemctl", "daemon-reload"]) + + # Add CLI options to all components + clear_flag("kubernetes-control-plane.apiserver.configured") + configure_controller_manager() + configure_scheduler() + + # kube-proxy + cluster_cidr = kubernetes_common.cluster_cidr() + if kubernetes_common.is_ipv6(cluster_cidr): + kubernetes_common.enable_ipv6_forwarding() + + local_address = get_ingress_address("kube-api-endpoint") + local_server = "https://{0}:{1}".format(local_address, 6443) + + configure_kube_proxy(configure_prefix, [local_server], cluster_cidr) + service_restart("snap.kube-proxy.daemon") + + set_state("kubernetes-control-plane.components.started") + hookenv.open_port(6443) + + +@when("config.changed.proxy-extra-args") +def proxy_args_changed(): + clear_flag("kubernetes-control-plane.components.started") + clear_flag("config.changed.proxy-extra-args") + + +@when("tls_client.certs.changed") +def certs_changed(): + if service_running(auth_webhook_svc_name): + service_restart(auth_webhook_svc_name) + clear_flag("kubernetes-control-plane.components.started") + clear_flag("tls_client.certs.changed") + + +@when("tls_client.ca.written") +def ca_written(): + clear_flag("kubernetes-control-plane.components.started") + if is_state("leadership.is_leader"): + if leader_get("kubernetes-master-addons-ca-in-use"): + leader_set({"kubernetes-master-addons-restart-for-ca": True}) + clear_flag("tls_client.ca.written") + clear_flag("kubernetes-control-plane.kubelet.configured") + + +@when("etcd.available") +def etcd_data_change(etcd): + """Etcd scale events block control-plane reconfiguration due to the + kubernetes-control-plane.components.started state. We need a way to + handle these events consistently only when the number of etcd + units has actually changed""" + + # key off of the connection string + connection_string = etcd.get_connection_string() + + # If the connection string changes, remove the started state to trigger + # handling of the control-plane components + if data_changed("etcd-connect", connection_string): + remove_state("kubernetes-control-plane.components.started") + + # If the cert info changes, remove the started state to trigger + # handling of the control-plane components + if data_changed("etcd-certs", etcd.get_client_credentials()): + clear_flag("kubernetes-control-plane.components.started") + + # We are the leader and the auto_storage_backend is not set meaning + # this is the first time we connect to etcd. + auto_storage_backend = leader_get("auto_storage_backend") + is_leader = is_state("leadership.is_leader") + if is_leader and not auto_storage_backend: + if etcd.get_version().startswith("3."): + leader_set(auto_storage_backend="etcd3") + else: + leader_set(auto_storage_backend="etcd2") + + +def get_dns_info(): + dns_provider = endpoint_from_flag("dns-provider.available") + try: + goal_state_rels = hookenv.goal_state().get("relations", {}) + except NotImplementedError: + goal_state_rels = {} + dns_provider_missing = not dns_provider and "dns-provider" not in goal_state_rels + dns_provider_pending = not dns_provider and "dns-provider" in goal_state_rels + try: + dns_disabled_cfg = get_dns_provider() == "none" + except InvalidDnsProvider: + dns_disabled_cfg = False + if dns_provider_missing and dns_disabled_cfg: + return True, None, None, None + elif dns_provider_pending: + return False, None, None, None + elif dns_provider: + details = dns_provider.details() + return True, details["sdn-ip"], details["port"], details["domain"] + else: + try: + dns_provider = get_dns_provider() + except InvalidDnsProvider: + hookenv.log(traceback.format_exc()) + return False, None, None, None + dns_domain = hookenv.config("dns_domain") + dns_ip = None + try: + dns_ip = kubernetes_control_plane.get_dns_ip() + except CalledProcessError: + hookenv.log("DNS addon service not ready yet") + return False, None, None, None + return True, dns_ip, 53, dns_domain + + +@when("kube-control.connected") +@when("cdk-addons.configured") +def send_cluster_dns_detail(kube_control): + """Send cluster DNS info""" + dns_ready, dns_ip, dns_port, dns_domain = get_dns_info() + if dns_ready: + kube_control.set_dns(dns_port, dns_domain, dns_ip, dns_ip is not None) + + +def create_tokens_and_sign_auth_requests(): + """Create tokens for CK users and services.""" + clear_flag("kubernetes-control-plane.auth-webhook-tokens.setup") + # NB: This may be called before kube-apiserver is up when bootstrapping new + # clusters with auth-webhook. In this case, setup_tokens will be a no-op. + # We will re-enter this function once control plane services are available to + # create proper secrets. + controller_manager_token = get_token("system:kube-controller-manager") + if not controller_manager_token: + setup_tokens(None, "system:kube-controller-manager", "kube-controller-manager") + + proxy_token = get_token("system:kube-proxy") + if not proxy_token: + setup_tokens(None, "system:kube-proxy", "kube-proxy") + proxy_token = get_token("system:kube-proxy") + + scheduler_token = get_token("system:kube-scheduler") + if not scheduler_token: + setup_tokens(None, "system:kube-scheduler", "system:kube-scheduler") + + client_token = get_token("admin") + if not client_token: + setup_tokens(None, "admin", "admin", "system:masters") + client_token = get_token("admin") + + monitoring_token = get_token("system:monitoring") + if not monitoring_token: + setup_tokens(None, "system:monitoring", "system:monitoring") + + if not (proxy_token and client_token): + # When bootstrapping a new cluster, we may not have all our secrets yet. + # Do not let the kubelets start without all the needed tokens. + hookenv.log( + "Missing required tokens for kubelet startup; will retry", hookenv.WARNING + ) + return False + + kube_control = endpoint_from_flag("kube-control.connected") + requests = kube_control.auth_user() if kube_control else [] + any_failed = False + for request in requests: + username = request[1]["user"] + group = request[1]["group"] + if not username or not group: + continue + kubelet_token = get_token(username) + if not kubelet_token: + # Username will be in the form of system:node:. + # User ID will be a worker , and while not used today, we store + # this in case it becomes useful to map a secret to a unit in the future. + userid = request[0] + setup_tokens(None, username, userid, group) + kubelet_token = get_token(username) + if not kubelet_token: + hookenv.log( + "Failed to create token for {}; will retry".format(username), + hookenv.WARNING, + ) + any_failed = True + continue + kube_control.sign_auth_request( + request[0], username, kubelet_token, proxy_token, client_token + ) + if not any_failed: + set_flag("kubernetes-control-plane.auth-webhook-tokens.setup") + return True + else: + return False + + +@when("kube-api-endpoint.available") +def push_service_data(): + """Send configuration to the load balancer, and close access to the + public interface. + """ + kube_api = endpoint_from_flag("kube-api-endpoint.available") + + endpoints = kubernetes_control_plane.get_endpoints_from_config() + if endpoints: + addresses = [e[0] for e in endpoints] + kube_api.configure( + kubernetes_control_plane.STANDARD_API_PORT, addresses, addresses + ) + else: + # no manually configured LBs, so rely on the interface layer + # to use the ingress address for each relation + kube_api.configure(kubernetes_control_plane.STANDARD_API_PORT) + + +@when("leadership.is_leader") +@when_any( + "endpoint.loadbalancer-internal.available", + "endpoint.loadbalancer-external.available", +) +def request_load_balancers(): + """Request LBs from the related provider(s).""" + for lb_type in ("internal", "external"): + lb_provider = endpoint_from_name("loadbalancer-" + lb_type) + if not lb_provider.is_available: + continue + req = lb_provider.get_request("api-server-" + lb_type) + req.protocol = req.protocols.tcp + ext_api_port = kubernetes_control_plane.EXTERNAL_API_PORT + int_api_port = kubernetes_control_plane.STANDARD_API_PORT + api_port = ext_api_port if lb_type == "external" else int_api_port + req.port_mapping = {api_port: int_api_port} + req.public = lb_type == "external" + if not req.health_checks: + req.add_health_check( + protocol=req.protocols.http, + port=int_api_port, + path="/livez", + ) + lb_provider.send_request(req) + + +@when("kube-control.connected") +def send_api_urls(): + kube_control = endpoint_from_name("kube-control") + if not hasattr(kube_control, "set_api_endpoints"): + # built with an old version of the kube-control interface + # the old kube-api-endpoint relation must be used instead + return + endpoints = kubernetes_control_plane.get_internal_api_endpoints() + if not endpoints: + return + kube_control.set_api_endpoints(kubernetes_control_plane.get_api_urls(endpoints)) + + +def has_external_cloud_provider(): + return bool(hookenv.relations().get("external-cloud-provider")) + + +@when("kube-control.connected") +def send_xcp_flag(): + has_xcp = has_external_cloud_provider() + kube_control = endpoint_from_name("kube-control") + kube_control.set_has_xcp(has_xcp) + + +@when("certificates.available", "cni.available") +def send_data(): + """Send the data that is required to create a server certificate for + this server.""" + # Use the public ip of this unit as the Common Name for the certificate. + common_name = hookenv.unit_public_ip() + + # Get the SDN gateways based on the service CIDRs. + k8s_service_ips = kubernetes_control_plane.get_kubernetes_service_ips() + + cluster_cidr = kubernetes_common.cluster_cidr() + bind_ips = kubernetes_common.get_bind_addrs( + ipv4=kubernetes_common.is_ipv4(cluster_cidr), + ipv6=kubernetes_common.is_ipv6(cluster_cidr), + ) + + # Get ingress address (this is probably already covered by bind_ips, + # but list it explicitly as well just in case it's not). + old_ingress_ip = get_ingress_address("kube-api-endpoint") + new_ingress_ip = get_ingress_address("kube-control") + + local_endpoint = kubernetes_control_plane.get_local_api_endpoint()[0][0] + + domain = hookenv.config("dns_domain") + # Create SANs that the tls layer will add to the server cert. + sans = ( + [ + # The CN field is checked as a hostname, so if it's an IP, it + # won't match unless also included in the SANs as an IP field. + common_name, + local_endpoint, + old_ingress_ip, + new_ingress_ip, + socket.gethostname(), + socket.getfqdn(), + "kubernetes", + "kubernetes.{0}".format(domain), + "kubernetes.default", + "kubernetes.default.svc", + "kubernetes.default.svc.{0}".format(domain), + ] + + k8s_service_ips + + bind_ips + ) + + sans.extend(e[0] for e in kubernetes_control_plane.get_internal_api_endpoints()) + sans.extend(e[0] for e in kubernetes_control_plane.get_external_api_endpoints()) + + # maybe they have extra names they want as SANs + extra_sans = hookenv.config("extra_sans") + if extra_sans and not extra_sans == "": + sans.extend(extra_sans.split()) + + # Request a server cert with this information. + tls_client.request_server_cert( + common_name, + sorted(set(sans)), + crt_path=server_crt_path, + key_path=server_key_path, + ) + + # Request a client cert for kubelet. + tls_client.request_client_cert( + "system:kube-apiserver", crt_path=client_crt_path, key_path=client_key_path + ) + + +@when( + "config.changed.extra_sans", "certificates.available", "kube-api-endpoint.available" +) +def update_certificates(): + # NOTE: This handler may be called by another function. Two relationships + # are required, otherwise the send_data function fails. + # (until the relations are available) + missing_relations = get_unset_flags( + "certificates.available", "kube-api-endpoint.available" + ) + if missing_relations: + hookenv.log( + "Missing relations: '{}'".format(", ".join(missing_relations)), + hookenv.ERROR, + ) + return + + # Using the config.changed.extra_sans flag to catch changes. + # IP changes will take ~5 minutes or so to propagate, but + # it will update. + send_data() + clear_flag("config.changed.extra_sans") + + +@when( + "kubernetes-control-plane.components.started", + "leadership.is_leader", + "cdk-addons.reconfigure", +) +def reconfigure_cdk_addons(): + configure_cdk_addons() + + +@when( + "kubernetes-control-plane.components.started", + "leadership.is_leader", + "leadership.set.cluster_tag", +) +@when_not("upgrade.series.in-progress") +def configure_cdk_addons(): + """Configure CDK addons""" + remove_state("cdk-addons.reconfigure") + remove_state("cdk-addons.configured") + remove_state("kubernetes-control-plane.aws.changed") + remove_state("kubernetes-control-plane.azure.changed") + remove_state("kubernetes-control-plane.gcp.changed") + remove_state("kubernetes-control-plane.openstack.changed") + load_gpu_plugin = hookenv.config("enable-nvidia-plugin").lower() + gpuEnable = ( + get_version("kube-apiserver") >= (1, 9) + and load_gpu_plugin == "auto" + and is_state("kubernetes-control-plane.gpu.enabled") + ) + registry = hookenv.config("image-registry") + dbEnabled = str(hookenv.config("enable-dashboard-addons")).lower() + try: + dnsProvider = get_dns_provider() + except InvalidDnsProvider: + hookenv.log(traceback.format_exc()) + return + metricsEnabled = str(hookenv.config("enable-metrics")).lower() + default_storage = "" + ceph = {} + ceph_ep = endpoint_from_flag("ceph-client.available") + cephfs_mounter = hookenv.config("cephfs-mounter") + cephEnabled = "false" + cephFsEnabled = "false" + if ceph_ep and ceph_ep.key and ceph_ep.mon_hosts(): + kubernetes_control_plane.install_ceph_common() + ceph_fsid = kubernetes_control_plane.get_ceph_fsid() + if ceph_fsid: + cephEnabled = "true" + b64_ceph_key = base64.b64encode(ceph_ep.key.encode("utf-8")) + ceph["admin_key"] = b64_ceph_key.decode("ascii") + ceph["fsid"] = ceph_fsid + ceph["kubernetes_key"] = b64_ceph_key.decode("ascii") + ceph["mon_hosts"] = " ".join(ceph_ep.mon_hosts()) + default_storage = hookenv.config("default-storage") + + if kubernetes_control_plane.query_cephfs_enabled(): + cephFsEnabled = "true" + ceph["fsname"] = kubernetes_control_plane.get_cephfs_fsname() or "" + + keystone = {} + ks = endpoint_from_flag("keystone-credentials.available") + if ks: + keystoneEnabled = "true" + keystone["cert"] = "/root/cdk/server.crt" + keystone["key"] = "/root/cdk/server.key" + keystone["url"] = "{}://{}:{}/v{}".format( + ks.credentials_protocol(), + ks.credentials_host(), + ks.credentials_port(), + ks.api_version(), + ) + keystone["keystone-ca"] = hookenv.config("keystone-ssl-ca") + else: + keystoneEnabled = "false" + + enable_aws = str(is_flag_set("endpoint.aws.ready")).lower() + enable_azure = str(is_flag_set("endpoint.azure.ready")).lower() + enable_gcp = str(is_flag_set("endpoint.gcp.ready")).lower() + enable_openstack = str(is_flag_set("endpoint.openstack.ready")).lower() + openstack = endpoint_from_flag("endpoint.openstack.ready") + + if is_state("kubernetes-control-plane.cdk-addons.unique-cluster-tag"): + cluster_tag = leader_get("cluster_tag") + else: + # allow for older upgraded charms to control when they start sending + # the unique cluster tag to cdk-addons + cluster_tag = "kubernetes" + + args = [ + "kubeconfig=" + cdk_addons_kubectl_config_path, + "arch=" + arch(), + "dns-domain=" + hookenv.config("dns_domain"), + "registry=" + registry, + "enable-dashboard=" + dbEnabled, + "enable-metrics=" + metricsEnabled, + "enable-gpu=" + str(gpuEnable).lower(), + "enable-ceph=" + cephEnabled, + "enable-cephfs=" + cephFsEnabled, + "cephfs-mounter=" + cephfs_mounter, + "ceph-admin-key=" + (ceph.get("admin_key", "")), + "ceph-fsid=" + (ceph.get("fsid", "")), + "ceph-fsname=" + (ceph.get("fsname", "")), + "ceph-kubernetes-key=" + (ceph.get("admin_key", "")), + 'ceph-mon-hosts="' + (ceph.get("mon_hosts", "")) + '"', + "ceph-user=" + hookenv.application_name(), + "default-storage=" + default_storage, + "enable-keystone=" + keystoneEnabled, + "keystone-cert-file=" + keystone.get("cert", ""), + "keystone-key-file=" + keystone.get("key", ""), + "keystone-server-url=" + keystone.get("url", ""), + "keystone-server-ca=" + keystone.get("keystone-ca", ""), + "dashboard-auth=token", + "enable-aws=" + enable_aws, + "enable-azure=" + enable_azure, + "enable-gcp=" + enable_gcp, + "enable-openstack=" + enable_openstack, + "cluster-tag=" + cluster_tag, + ] + if openstack: + args.extend( + [ + "openstack-cloud-conf=" + + base64.b64encode( + generate_openstack_cloud_config().encode("utf-8") + ).decode("utf-8"), + "openstack-endpoint-ca=" + (openstack.endpoint_tls_ca or ""), + ] + ) + if get_version("kube-apiserver") >= (1, 14): + args.append("dns-provider=" + dnsProvider) + else: + enableKubeDNS = dnsProvider == "kube-dns" + args.append("enable-kube-dns=" + str(enableKubeDNS).lower()) + check_call(["snap", "set", "cdk-addons"] + args) + if not addons_ready(): + remove_state("cdk-addons.configured") + return + + set_state("cdk-addons.configured") + leader_set({"kubernetes-master-addons-ca-in-use": True}) + if ks: + leader_set({"keystone-cdk-addons-configured": True}) + else: + leader_set({"keystone-cdk-addons-configured": None}) + + +@retry(times=3, delay_secs=20) +def addons_ready(): + """ + Test if the add ons got installed + + Returns: True is the addons got applied + + """ + try: + check_call(["cdk-addons.apply"]) + return True + except CalledProcessError: + hookenv.log("Addons are not ready yet.") + return False + + +@when("ceph-client.connected") +@when_not("kubernetes-control-plane.ceph.pool.created") +def ceph_storage_pool(): + """Once Ceph relation is ready, + we need to add storage pools. + + :return: None + """ + hookenv.log("Creating Ceph pools.") + ceph_client = endpoint_from_flag("ceph-client.connected") + + pools = ["xfs-pool", "ext4-pool"] + + for pool in pools: + hookenv.status_set("maintenance", "Creating {} pool.".format(pool)) + try: + ceph_client.create_pool(name=pool, replicas=3) + except Exception as e: + hookenv.status_set("blocked", "Error creating {} pool: {}.".format(pool, e)) + + set_state("kubernetes-control-plane.ceph.pool.created") + + +@when("nrpe-external-master.available") +@when_not("nrpe-external-master.initial-config") +def initial_nrpe_config(): + set_state("nrpe-external-master.initial-config") + update_nrpe_config() + + +@when("config.changed.authorization-mode") +def switch_auth_mode(forced=False): + config = hookenv.config() + mode = config.get("authorization-mode") + + if data_changed("auth-mode", mode) or forced: + # manage flags to handle rbac related resources + if mode and "rbac" in mode.lower(): + remove_state("kubernetes-control-plane.remove.rbac") + set_state("kubernetes-control-plane.create.rbac") + else: + remove_state("kubernetes-control-plane.create.rbac") + set_state("kubernetes-control-plane.remove.rbac") + + # set ourselves up to restart since auth mode has changed + remove_state("kubernetes-control-plane.components.started") + + +@when("leadership.is_leader", "kubernetes-control-plane.components.started") +@when_not("kubernetes-control-plane.pod-security-policy.applied") +def create_pod_security_policy_resources(): + pod_security_policy_path = "/root/cdk/pod-security-policy.yaml" + pod_security_policy = hookenv.config("pod-security-policy") + if pod_security_policy: + hookenv.log("Using configuration defined on pod-security-policy option") + write_file_with_autogenerated_header( + pod_security_policy_path, pod_security_policy + ) + else: + hookenv.log("Using the default rbac-pod-security-policy template") + render("rbac-pod-security-policy.yaml", pod_security_policy_path, {}) + + hookenv.log("Creating pod security policy resources.") + if kubectl_manifest("apply", pod_security_policy_path): + set_state("kubernetes-control-plane.pod-security-policy.applied") + else: + msg = "Failed to apply {}, will retry.".format(pod_security_policy_path) + hookenv.log(msg) + + +@when( + "leadership.is_leader", + "kubernetes-control-plane.components.started", + "kubernetes-control-plane.create.rbac", +) +def create_rbac_resources(): + rbac_proxy_path = "/root/cdk/rbac-proxy.yaml" + + # NB: when metrics and logs are retrieved by proxy, the 'user' is the + # common name of the cert used to authenticate the proxied request. + # The CN for /root/cdk/client.crt is 'system:kube-apiserver' + # (see the send_data handler, above). + proxy_users = ["client", "system:kube-apiserver"] + + context = {"juju_application": hookenv.service_name(), "proxy_users": proxy_users} + render("rbac-proxy.yaml", rbac_proxy_path, context) + + hookenv.log("Creating proxy-related RBAC resources.") + if kubectl_manifest("apply", rbac_proxy_path): + remove_state("kubernetes-control-plane.create.rbac") + else: + msg = "Failed to apply {}, will retry.".format(rbac_proxy_path) + hookenv.log(msg) + + +@when("leadership.is_leader", "kubernetes-control-plane.components.started") +@when_not("kubernetes-control-plane.system-monitoring-rbac-role.applied") +def apply_system_monitoring_rbac_role(): + try: + hookenv.status_set("maintenance", "Applying system:monitoring RBAC role") + path = "/root/cdk/system-monitoring-rbac-role.yaml" + render("system-monitoring-rbac-role.yaml", path, {}) + kubectl("apply", "-f", path) + set_state("kubernetes-control-plane.system-monitoring-rbac-role.applied") + except Exception: + hookenv.log(traceback.format_exc()) + hookenv.log("Waiting to retry applying system:monitoring RBAC role") + return + + +@when( + "leadership.is_leader", + "kubernetes-control-plane.components.started", + "kubernetes-control-plane.remove.rbac", +) +def remove_rbac_resources(): + rbac_proxy_path = "/root/cdk/rbac-proxy.yaml" + if os.path.isfile(rbac_proxy_path): + hookenv.log("Removing proxy-related RBAC resources.") + if kubectl_manifest("delete", rbac_proxy_path): + os.remove(rbac_proxy_path) + remove_state("kubernetes-control-plane.remove.rbac") + else: + msg = "Failed to delete {}, will retry.".format(rbac_proxy_path) + hookenv.log(msg) + else: + # if we dont have the yaml, there's nothing for us to do + remove_state("kubernetes-control-plane.remove.rbac") + + +@when("kubernetes-control-plane.components.started") +@when("nrpe-external-master.available") +@when_any("config.changed.nagios_context", "config.changed.nagios_servicegroups") +def update_nrpe_config(): + services = ["snap.{}.daemon".format(s) for s in control_plane_services] + services += [auth_webhook_svc_name] + + plugin = install_nagios_plugin_from_file( + "templates/nagios_plugin.py", "check_k8s_master.py" + ) + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.add_init_service_checks(nrpe_setup, services, current_unit) + nrpe_setup.add_check( + "k8s-api-server", + "Verify that the Kubernetes API server is accessible", + str(plugin), + ) + nrpe_setup.write() + + +@when_not("nrpe-external-master.available") +@when("nrpe-external-master.initial-config") +def remove_nrpe_config(): + # List of systemd services for which the checks will be removed + services = ["snap.{}.daemon".format(s) for s in control_plane_services] + + remove_nagios_plugin("check_k8s_master.py") + + # The current nrpe-external interface doesn't handle a lot of logic, + # use the charm-helpers code for now. + hostname = nrpe.get_nagios_hostname() + nrpe_setup = nrpe.NRPE(hostname=hostname) + + for service in services: + nrpe_setup.remove_check(shortname=service) + nrpe_setup.remove_check(shortname="k8s-api-server") + remove_state("nrpe-external-master.initial-config") + + +def is_privileged(): + """Return boolean indicating whether or not to set allow-privileged=true.""" + privileged = hookenv.config("allow-privileged").lower() + if privileged == "auto": + return ( + is_state("kubernetes-control-plane.gpu.enabled") + or is_state("ceph-client.available") + or is_state("endpoint.openstack.joined") + ) + else: + return privileged == "true" + + +@when("config.changed.allow-privileged") +@when("kubernetes-control-plane.components.started") +def on_config_allow_privileged_change(): + """React to changed 'allow-privileged' config value.""" + remove_state("kubernetes-control-plane.components.started") + remove_state("config.changed.allow-privileged") + + +@when_any( + "config.changed.api-extra-args", + "config.changed.audit-policy", + "config.changed.audit-webhook-config", + "config.changed.enable-keystone-authorization", + "config.changed.service-cidr", +) +@when("kubernetes-control-plane.components.started") +@when("leadership.set.auto_storage_backend") +@when("etcd.available") +def reconfigure_apiserver(): + clear_flag("kubernetes-control-plane.apiserver.configured") + + +@when("config.changed.controller-manager-extra-args") +@when("kubernetes-control-plane.components.started") +def on_config_controller_manager_extra_args_change(): + configure_controller_manager() + + +@when("config.changed.scheduler-extra-args") +@when("kubernetes-control-plane.components.started") +def on_config_scheduler_extra_args_change(): + configure_scheduler() + + +@when("kube-control.gpu.available") +@when("kubernetes-control-plane.components.started") +@when_not("kubernetes-control-plane.gpu.enabled") +def on_gpu_available(kube_control): + """The remote side (kubernetes-worker) is gpu-enabled. + + We need to run in privileged mode. + + """ + kube_version = get_version("kube-apiserver") + config = hookenv.config() + if config["allow-privileged"].lower() == "false" and kube_version < (1, 9): + return + + remove_state("kubernetes-control-plane.components.started") + set_state("kubernetes-control-plane.gpu.enabled") + + +@when("kubernetes-control-plane.gpu.enabled") +@when("kubernetes-control-plane.components.started") +@when_not("kubernetes-control-plane.privileged") +def gpu_with_no_privileged(): + """We were in gpu mode, but the operator has set allow-privileged="false", + so we can't run in gpu mode anymore. + + """ + if get_version("kube-apiserver") < (1, 9): + remove_state("kubernetes-control-plane.gpu.enabled") + + +@when("kube-control.connected") +@when_not("kube-control.gpu.available") +@when("kubernetes-control-plane.gpu.enabled") +@when("kubernetes-control-plane.components.started") +def gpu_departed(kube_control): + """We were in gpu mode, but the workers informed us there is + no gpu support anymore. + + """ + remove_state("kubernetes-control-plane.gpu.enabled") + + +@hook("stop") +def shutdown(): + """Stop the kubernetes control-plane services""" + for service in control_plane_services: + service_stop("snap.%s.daemon" % service) + + +@when( + "certificates.ca.available", + "certificates.client.cert.available", + "authentication.setup", +) +def build_kubeconfig(): + """Gather the relevant data for Kubernetes configuration objects and create + a config object with that information.""" + local_endpoint = kubernetes_control_plane.get_local_api_endpoint() + internal_endpoints = kubernetes_control_plane.get_internal_api_endpoints() + external_endpoints = kubernetes_control_plane.get_external_api_endpoints() + + # Do we have everything we need? + if ca_crt_path.exists() and internal_endpoints and external_endpoints: + local_url = kubernetes_control_plane.get_api_url(local_endpoint) + internal_url = kubernetes_control_plane.get_api_url(internal_endpoints) + external_url = kubernetes_control_plane.get_api_url(external_endpoints) + client_pass = get_token("admin") + if not client_pass: + # If we made it this far without a password, we're bootstrapping a new + # cluster. Create a new token so we can build an admin kubeconfig. The + # auth-webhook service will ack this value from the kubeconfig file, + # allowing us to continue until the control-plane is started and a proper + # secret can be created. + client_pass = ( + hookenv.config("client_password") + or kubernetes_control_plane.token_generator() + ) + client_pass = "admin::{}".format(client_pass) + + # drop keystone helper script? + ks = endpoint_from_flag("keystone-credentials.available") + if ks: + script_filename = "kube-keystone.sh" + keystone_path = os.path.join(os.sep, "home", "ubuntu", script_filename) + context = { + "protocol": ks.credentials_protocol(), + "address": ks.credentials_host(), + "port": ks.credentials_port(), + "version": ks.api_version(), + } + render(script_filename, keystone_path, context) + elif is_state("leadership.set.keystone-cdk-addons-configured"): + # if addons are configured, we're going to do keystone + # just not yet because we don't have creds + hookenv.log("Keystone endpoint not found, will retry.") + + cluster_id = None + aws_iam = endpoint_from_flag("endpoint.aws-iam.available") + if aws_iam: + cluster_id = aws_iam.get_cluster_id() + + # Create an absolute path for the kubeconfig file. + kubeconfig_path = os.path.join(os.sep, "home", "ubuntu", "config") + + # Create the kubeconfig on this system so users can access the cluster. + hookenv.log("Writing kubeconfig file.") + + if ks: + create_kubeconfig( + kubeconfig_path, + external_url, + ca_crt_path, + user="admin", + token=client_pass, + keystone=True, + aws_iam_cluster_id=cluster_id, + ) + else: + create_kubeconfig( + kubeconfig_path, + external_url, + ca_crt_path, + user="admin", + token=client_pass, + aws_iam_cluster_id=cluster_id, + ) + + # Make the config file readable by the ubuntu users so juju scp works. + cmd = ["chown", "ubuntu:ubuntu", kubeconfig_path] + check_call(cmd) + + # make a kubeconfig for root / the charm + create_kubeconfig( + kubeclientconfig_path, + local_url, + ca_crt_path, + user="admin", + token=client_pass, + ) + + # Create kubernetes configuration in the default location for ubuntu. + create_kubeconfig( + "/home/ubuntu/.kube/config", + internal_url, + ca_crt_path, + user="admin", + token=client_pass, + ) + # Make the config dir readable by the ubuntu user + check_call(["chown", "-R", "ubuntu:ubuntu", "/home/ubuntu/.kube"]) + + # make a kubeconfig for cdk-addons + create_kubeconfig( + cdk_addons_kubectl_config_path, + local_url, + ca_crt_path, + user="admin", + token=client_pass, + ) + + # make a kubeconfig for our services + proxy_token = get_token("system:kube-proxy") + if proxy_token: + create_kubeconfig( + kubeproxyconfig_path, + local_url, + ca_crt_path, + token=proxy_token, + user="kube-proxy", + ) + controller_manager_token = get_token("system:kube-controller-manager") + if controller_manager_token: + create_kubeconfig( + kubecontrollermanagerconfig_path, + local_url, + ca_crt_path, + token=controller_manager_token, + user="kube-controller-manager", + ) + scheduler_token = get_token("system:kube-scheduler") + if scheduler_token: + create_kubeconfig( + kubeschedulerconfig_path, + local_url, + ca_crt_path, + token=scheduler_token, + user="kube-scheduler", + ) + + cni = endpoint_from_name("cni") + if cni: + cni.notify_kubeconfig_changed() + + +def handle_etcd_relation(reldata): + """Save the client credentials and set appropriate daemon flags when + etcd declares itself as available""" + # Define where the etcd tls files will be kept. + etcd_dir = "/root/cdk/etcd" + + # Create paths to the etcd client ca, key, and cert file locations. + ca = os.path.join(etcd_dir, "client-ca.pem") + key = os.path.join(etcd_dir, "client-key.pem") + cert = os.path.join(etcd_dir, "client-cert.pem") + + # Save the client credentials (in relation data) to the paths provided. + reldata.save_client_credentials(key, cert, ca) + + +def remove_if_exists(path): + try: + os.remove(path) + except FileNotFoundError: + pass + + +def write_file_with_autogenerated_header(path, contents): + with open(path, "w") as f: + header = "# Autogenerated by kubernetes-control-plane charm" + f.write(header + "\n" + contents) + + +@when( + "etcd.available", + "cni.available", + "kubernetes-control-plane.auth-webhook-service.started", +) +@when_not("kubernetes-control-plane.apiserver.configured") +def configure_apiserver(): + etcd_connection_string = endpoint_from_flag( + "etcd.available" + ).get_connection_string() + if not etcd_connection_string: + # etcd is not returning a connection string. This happens when + # the control-plane unit disconnects from etcd and is ready to terminate. + # No point in trying to start control-plane services and fail. Just return. + return + + # Update unit db service-cidr + was_service_cidr_expanded = kubernetes_control_plane.is_service_cidr_expansion() + kubernetes_control_plane.freeze_service_cidr() + + cluster_cidr = kubernetes_common.cluster_cidr() + service_cidr = kubernetes_control_plane.service_cidr() + + api_opts = {} + + if is_privileged(): + api_opts["allow-privileged"] = "true" + set_state("kubernetes-control-plane.privileged") + else: + api_opts["allow-privileged"] = "false" + remove_state("kubernetes-control-plane.privileged") + + # Handle static options for now + api_opts["service-cluster-ip-range"] = service_cidr + feature_gates = [] + if kubernetes_common.is_dual_stack(cluster_cidr): + feature_gates.append("IPv6DualStack=true") + api_opts["min-request-timeout"] = "300" + api_opts["v"] = "4" + api_opts["tls-cert-file"] = str(server_crt_path) + api_opts["tls-private-key-file"] = str(server_key_path) + api_opts["tls-cipher-suites"] = ",".join(tls_ciphers_intermediate) + api_opts["kubelet-certificate-authority"] = str(ca_crt_path) + api_opts["kubelet-client-certificate"] = str(client_crt_path) + api_opts["kubelet-client-key"] = str(client_key_path) + api_opts["logtostderr"] = "true" + api_opts["storage-backend"] = getStorageBackend() + api_opts["profiling"] = "false" + + api_opts["anonymous-auth"] = "false" + api_opts["authentication-token-webhook-cache-ttl"] = "1m0s" + api_opts["authentication-token-webhook-config-file"] = auth_webhook_conf + api_opts["service-account-issuer"] = "https://kubernetes.default.svc" + api_opts["service-account-signing-key-file"] = "/root/cdk/serviceaccount.key" + api_opts["service-account-key-file"] = "/root/cdk/serviceaccount.key" + api_opts[ + "kubelet-preferred-address-types" + ] = "InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP" + api_opts["encryption-provider-config"] = str(encryption_config_path()) + if kubernetes_common.is_ipv6(cluster_cidr): + api_opts["bind-address"] = "::" + if kubernetes_common.is_ipv6_preferred(cluster_cidr): + api_opts["advertise-address"] = get_ingress_address6("kube-control") + else: + api_opts["advertise-address"] = get_ingress_address("kube-control") + + etcd_dir = "/root/cdk/etcd" + etcd_ca = os.path.join(etcd_dir, "client-ca.pem") + etcd_key = os.path.join(etcd_dir, "client-key.pem") + etcd_cert = os.path.join(etcd_dir, "client-cert.pem") + + api_opts["etcd-cafile"] = etcd_ca + api_opts["etcd-keyfile"] = etcd_key + api_opts["etcd-certfile"] = etcd_cert + api_opts["etcd-servers"] = etcd_connection_string + + # In Kubernetes 1.10 and later, some admission plugins are enabled by + # default. The current list of default plugins can be found at + # https://bit.ly/2meP9XT, listed under the '--enable-admission-plugins' + # option. + # + # The list below need only include the plugins we want to enable + # in addition to the defaults. + + admission_plugins = [ + "PersistentVolumeLabel", + "PodSecurityPolicy", + "NodeRestriction", + ] + + auth_mode = hookenv.config("authorization-mode") + + ks = endpoint_from_flag("keystone-credentials.available") + if ks: + ks_ip = get_service_ip("k8s-keystone-auth-service", errors_fatal=False) + if ks_ip: + os.makedirs(keystone_root, exist_ok=True) + + keystone_webhook = keystone_root + "/webhook.yaml" + context = {} + context["keystone_service_cluster_ip"] = ks_ip + render("keystone-api-server-webhook.yaml", keystone_webhook, context) + + if hookenv.config("enable-keystone-authorization"): + # if user wants authorization, enable it + if "Webhook" not in auth_mode: + auth_mode += ",Webhook" + api_opts["authorization-webhook-config-file"] = keystone_webhook # noqa + set_state("keystone.apiserver.configured") + else: + hookenv.log("Unable to find k8s-keystone-auth-service. Will retry") + # Note that we can get into a nasty state here + # if the user has specified webhook and they're relying on + # keystone auth to handle that, the api server will fail to + # start because we push it Webhook and no webhook config. + # We can't generate the config because we can't talk to the + # apiserver to get the ip of the service to put into the + # webhook template. A chicken and egg problem. To fix this, + # remove Webhook if keystone is related and trying to come + # up until we can find the service IP. + if "Webhook" in auth_mode: + auth_mode = ",".join( + [i for i in auth_mode.split(",") if i != "Webhook"] + ) + remove_state("keystone.apiserver.configured") + elif is_state("leadership.set.keystone-cdk-addons-configured"): + hookenv.log("Keystone endpoint not found, will retry.") + + api_opts["authorization-mode"] = auth_mode + api_opts["enable-admission-plugins"] = ",".join(admission_plugins) + + kube_version = get_version("kube-apiserver") + + if kube_version > (1, 6) and hookenv.config("api-aggregation-extension"): + api_opts["requestheader-client-ca-file"] = str(ca_crt_path) + api_opts["requestheader-allowed-names"] = "system:kube-apiserver,client" + api_opts["requestheader-extra-headers-prefix"] = "X-Remote-Extra-" + api_opts["requestheader-group-headers"] = "X-Remote-Group" + api_opts["requestheader-username-headers"] = "X-Remote-User" + api_opts["proxy-client-cert-file"] = str(client_crt_path) + api_opts["proxy-client-key-file"] = str(client_key_path) + api_opts["enable-aggregator-routing"] = "true" + api_opts["client-ca-file"] = str(ca_crt_path) + + api_cloud_config_path = cloud_config_path("kube-apiserver") + if has_external_cloud_provider(): + api_opts["cloud-provider"] = "external" + elif is_state("endpoint.aws.ready"): + api_opts["cloud-provider"] = "aws" + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + api_opts["cloud-provider"] = "gce" + api_opts["cloud-config"] = str(api_cloud_config_path) + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.vsphere.ready") and get_version("kube-apiserver") >= ( + 1, + 12, + ): + api_opts["cloud-provider"] = "vsphere" + api_opts["cloud-config"] = str(api_cloud_config_path) + elif is_state("endpoint.azure.ready"): + api_opts["cloud-provider"] = "azure" + api_opts["cloud-config"] = str(api_cloud_config_path) + feature_gates.append("CSIMigrationAzureDisk=false") + + api_opts["feature-gates"] = ",".join(feature_gates) + + audit_root = "/root/cdk/audit" + os.makedirs(audit_root, exist_ok=True) + + audit_log_path = audit_root + "/audit.log" + api_opts["audit-log-path"] = audit_log_path + api_opts["audit-log-maxage"] = "30" + api_opts["audit-log-maxsize"] = "100" + api_opts["audit-log-maxbackup"] = "10" + + audit_policy_path = audit_root + "/audit-policy.yaml" + audit_policy = hookenv.config("audit-policy") + if audit_policy: + write_file_with_autogenerated_header(audit_policy_path, audit_policy) + api_opts["audit-policy-file"] = audit_policy_path + else: + remove_if_exists(audit_policy_path) + + audit_webhook_config_path = audit_root + "/audit-webhook-config.yaml" + audit_webhook_config = hookenv.config("audit-webhook-config") + if audit_webhook_config: + write_file_with_autogenerated_header( + audit_webhook_config_path, audit_webhook_config + ) + api_opts["audit-webhook-config-file"] = audit_webhook_config_path + else: + remove_if_exists(audit_webhook_config_path) + + configure_kubernetes_service( + configure_prefix, "kube-apiserver", api_opts, "api-extra-args" + ) + service_restart("snap.kube-apiserver.daemon") + + if was_service_cidr_expanded and is_state("leadership.is_leader"): + set_flag("kubernetes-control-plane.had-service-cidr-expanded") + + set_flag("kubernetes-control-plane.apiserver.configured") + if kubernetes_control_plane.check_service("kube-apiserver"): + set_flag("kubernetes-control-plane.apiserver.running") + + +@when("kubernetes-control-plane.apiserver.configured") +@when_not("kubernetes-control-plane.apiserver.running") +def check_apiserver(): + if kubernetes_control_plane.check_service("kube-apiserver"): + set_flag("kubernetes-control-plane.apiserver.running") + + +@when( + "kubernetes-control-plane.had-service-cidr-expanded", + "kubernetes-control-plane.apiserver.configured", + "leadership.is_leader", +) +def update_for_service_cidr_expansion(): + # We just restarted the API server, so there's a decent chance it's + # not up yet. Keep trying to get the svcs list until we can; get_svcs + # has a built-in retry and delay, so this should try for around 30s. + def _wait_for_svc_ip(): + for attempt in range(10): + svcs = get_svcs() + if svcs: + svc_ip = { + svc["metadata"]["name"]: svc["spec"]["clusterIP"] + for svc in svcs["items"] + }.get("kubernetes") + if svc_ip: + return svc_ip + else: + return None + + hookenv.log("service-cidr expansion: Waiting for API service") + # First network is the default, which is used for the API service's address. + # This logic will likely need to change once dual-stack services are + # supported: https://bit.ly/2YlbxOx + expected_service_ip = kubernetes_control_plane.get_kubernetes_service_ips()[0] + actual_service_ip = _wait_for_svc_ip() + if not actual_service_ip: + hookenv.log("service-cidr expansion: Timed out waiting for API service") + return + try: + if actual_service_ip != expected_service_ip: + hookenv.log("service-cidr expansion: Deleting service kubernetes") + kubectl("delete", "service", "kubernetes") + actual_service_ip = _wait_for_svc_ip() + if not actual_service_ip: + # we might need another restart to get the service recreated + hookenv.log( + "service-cidr expansion: Timed out waiting for " + "the service to return; restarting API server" + ) + clear_flag("kubernetes-control-plane.apiserver.configured") + return + if actual_service_ip != expected_service_ip: + raise ValueError( + "Unexpected service IP: {} != {}".format( + actual_service_ip, expected_service_ip + ) + ) + + # Restart the cdk-addons + # Get deployments/daemonsets/statefulsets + hookenv.log("service-cidr expansion: Restart the cdk-addons") + output = kubectl( + "get", + "daemonset,deployment,statefulset", + "-o", + "json", + "--all-namespaces", + "-l", + "cdk-restart-on-ca-change=true", + ).decode("UTF-8") + deployments = json.loads(output)["items"] + + # Now restart the addons + for deployment in deployments: + kind = deployment["kind"] + namespace = deployment["metadata"]["namespace"] + name = deployment["metadata"]["name"] + hookenv.log("Restarting addon: {0} {1} {2}".format(kind, namespace, name)) + kubectl("rollout", "restart", kind + "/" + name, "-n", namespace) + except CalledProcessError: + # the kubectl calls already log the command and don't capture stderr, + # so logging the exception is a bit superfluous + hookenv.log("service-cidr expansion: failed to restart components") + else: + clear_flag("kubernetes-control-plane.had-service-cidr-expanded") + + +def configure_controller_manager(): + controller_opts = {} + cluster_cidr = kubernetes_common.cluster_cidr() + service_cidr = kubernetes_control_plane.service_cidr() + + # Default to 3 minute resync. TODO: Make this configurable? + controller_opts["min-resync-period"] = "3m" + controller_opts["v"] = "2" + controller_opts["root-ca-file"] = str(ca_crt_path) + controller_opts["logtostderr"] = "true" + controller_opts["kubeconfig"] = kubecontrollermanagerconfig_path + controller_opts["authorization-kubeconfig"] = kubecontrollermanagerconfig_path + controller_opts["authentication-kubeconfig"] = kubecontrollermanagerconfig_path + controller_opts["use-service-account-credentials"] = "true" + controller_opts["service-account-private-key-file"] = "/root/cdk/serviceaccount.key" + controller_opts["tls-cert-file"] = str(server_crt_path) + controller_opts["tls-private-key-file"] = str(server_key_path) + controller_opts["cluster-name"] = leader_get("cluster_tag") + controller_opts["terminated-pod-gc-threshold"] = "12500" + controller_opts["profiling"] = "false" + controller_opts["service-cluster-ip-range"] = service_cidr + controller_opts["cluster-cidr"] = cluster_cidr + feature_gates = ["RotateKubeletServerCertificate=true"] + if kubernetes_common.is_dual_stack(cluster_cidr): + feature_gates.append("IPv6DualStack=true") + net_ipv6 = kubernetes_common.get_ipv6_network(cluster_cidr) + if net_ipv6: + controller_opts["node-cidr-mask-size-ipv6"] = net_ipv6.prefixlen + + cm_cloud_config_path = cloud_config_path("kube-controller-manager") + if has_external_cloud_provider(): + controller_opts["cloud-provider"] = "external" + elif is_state("endpoint.aws.ready"): + controller_opts["cloud-provider"] = "aws" + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + controller_opts["cloud-provider"] = "gce" + controller_opts["cloud-config"] = str(cm_cloud_config_path) + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.vsphere.ready") and get_version("kube-apiserver") >= ( + 1, + 12, + ): + controller_opts["cloud-provider"] = "vsphere" + controller_opts["cloud-config"] = str(cm_cloud_config_path) + elif is_state("endpoint.azure.ready"): + controller_opts["cloud-provider"] = "azure" + controller_opts["cloud-config"] = str(cm_cloud_config_path) + feature_gates.append("CSIMigrationAzureDisk=false") + + controller_opts["feature-gates"] = ",".join(feature_gates) + + configure_kubernetes_service( + configure_prefix, + "kube-controller-manager", + controller_opts, + "controller-manager-extra-args", + ) + service_restart("snap.kube-controller-manager.daemon") + + +def configure_scheduler(): + kube_scheduler_config_path = "/root/cdk/kube-scheduler-config.yaml" + + scheduler_opts = {} + + scheduler_opts["v"] = "2" + scheduler_opts["logtostderr"] = "true" + scheduler_opts["profiling"] = "false" + scheduler_opts["config"] = kube_scheduler_config_path + + feature_gates = [] + + if is_state("endpoint.aws.ready"): + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.azure.ready"): + feature_gates.append("CSIMigrationAzureDisk=false") + + scheduler_opts["feature-gates"] = ",".join(feature_gates) + + scheduler_ver = get_version("kube-scheduler") + if scheduler_ver >= (1, 23): + api_ver = "v1beta2" + elif scheduler_ver >= (1, 19): + api_ver = "v1beta1" + elif scheduler_ver >= (1, 18): + api_ver = "v1alpha2" + else: + api_ver = "v1alpha1" + + host.write_file( + path=kube_scheduler_config_path, + perms=0o600, + content=yaml.safe_dump( + { + "apiVersion": "kubescheduler.config.k8s.io/{}".format(api_ver), + "kind": "KubeSchedulerConfiguration", + "clientConnection": {"kubeconfig": kubeschedulerconfig_path}, + } + ), + ) + + configure_kubernetes_service( + configure_prefix, "kube-scheduler", scheduler_opts, "scheduler-extra-args" + ) + + service_restart("snap.kube-scheduler.daemon") + + +def setup_tokens(token, username, user, groups=None): + """Create a token for kubernetes authentication. + + Create a new secret if known_tokens have been migrated. Otherwise, + add an entry to the 'known_tokens.csv' file. + """ + if not token: + token = kubernetes_control_plane.token_generator() + if is_flag_set("kubernetes-control-plane.token-auth.migrated"): + # We need the apiserver before we can create secrets. + if is_flag_set("kubernetes-control-plane.apiserver.configured"): + kubernetes_control_plane.create_secret(token, username, user, groups) + else: + hookenv.log("Delaying secret creation until the apiserver is configured.") + else: + kubernetes_control_plane.create_known_token(token, username, user, groups) + + +def get_token(username): + """Fetch a token for the given username. + + Grab a token from the given user's secret if known_tokens have been + migrated. Otherwise, fetch it from the 'known_tokens.csv' file. + """ + if is_flag_set("kubernetes-control-plane.token-auth.migrated"): + return kubernetes_common.get_secret_password(username) + else: + return kubernetes_control_plane.get_csv_password("known_tokens.csv", username) + + +def set_token(password, save_salt): + """Store a token so it can be recalled later by token_generator. + + param: password - the password to be stored + param: save_salt - the key to store the value of the token.""" + db.set(save_salt, password) + return db.get(save_salt) + + +@retry(times=3, delay_secs=1) +def get_pods(namespace="default"): + try: + output = kubectl( + "get", "po", "-n", namespace, "-o", "json", "--request-timeout", "10s" + ).decode("UTF-8") + result = json.loads(output) + except CalledProcessError: + hookenv.log("failed to get {} pod status".format(namespace)) + return None + return result + + +@retry(times=3, delay_secs=1) +def get_svcs(namespace="default"): + try: + output = kubectl( + "get", "svc", "-n", namespace, "-o", "json", "--request-timeout", "10s" + ).decode("UTF-8") + result = json.loads(output) + except CalledProcessError: + hookenv.log("failed to get {} service status".format(namespace)) + return None + return result + + +class FailedToGetPodStatus(Exception): + pass + + +def get_kube_system_pods_not_running(): + """Check pod status in the kube-system namespace. Throws + FailedToGetPodStatus if unable to determine pod status. This can + occur when the api server is not currently running. On success, + returns a list of pods that are not currently running + or an empty list if all are running.""" + + result = get_pods("kube-system") + if result is None: + raise FailedToGetPodStatus + + hookenv.log( + "Checking system pods status: {}".format( + ", ".join( + "=".join([pod["metadata"]["name"], pod["status"]["phase"]]) + for pod in result["items"] + ) + ) + ) + + # Pods that are Running or Evicted (which should re-spawn) are + # considered running + not_running = [ + pod + for pod in result["items"] + if pod["status"]["phase"] != "Running" + and pod["status"].get("reason", "") != "Evicted" + ] + + pending = [pod for pod in result["items"] if pod["status"]["phase"] == "Pending"] + any_pending = len(pending) > 0 + if is_state("endpoint.gcp.ready") and any_pending: + poke_network_unavailable() + return not_running + + return not_running + + +def poke_network_unavailable(): + """ + Work around https://github.com/kubernetes/kubernetes/issues/44254 by + manually poking the status into the API server to tell the nodes they have + a network route. + + This is needed because kubelet sets the NetworkUnavailable flag and expects + the network plugin to clear it, which only kubenet does. There is some + discussion about refactoring the affected code but nothing has happened + in a while. + """ + internal_endpoints = kubernetes_control_plane.get_internal_api_endpoints() + internal_url = kubernetes_control_plane.get_api_url(internal_endpoints) + + client_token = get_token("admin") + http_header = ("Authorization", "Bearer {}".format(client_token)) + + try: + output = kubectl("get", "nodes", "-o", "json").decode("utf-8") + nodes = json.loads(output)["items"] + except CalledProcessError: + hookenv.log("failed to get kube-system nodes") + return + except (KeyError, json.JSONDecodeError) as e: + hookenv.log( + "failed to parse kube-system node status " "({}): {}".format(e, output), + hookenv.ERROR, + ) + return + + for node in nodes: + node_name = node["metadata"]["name"] + url = "{}/api/v1/nodes/{}/status".format(internal_url, node_name) + req = Request(url) + req.add_header(*http_header) + with urlopen(req) as response: + code = response.getcode() + body = response.read().decode("utf8") + if code != 200: + hookenv.log( + "failed to get node status from {} [{}]: {}".format(url, code, body), + hookenv.ERROR, + ) + return + try: + node_info = json.loads(body) + conditions = node_info["status"]["conditions"] + i = [c["type"] for c in conditions].index("NetworkUnavailable") + if conditions[i]["status"] == "True": + hookenv.log("Clearing NetworkUnavailable from {}".format(node_name)) + conditions[i] = { + "type": "NetworkUnavailable", + "status": "False", + "reason": "RouteCreated", + "message": "Manually set through k8s api", + } + req = Request( + url, + method="PUT", + data=json.dumps(node_info).encode("utf8"), + headers={"Content-Type": "application/json"}, + ) + req.add_header(*http_header) + with urlopen(req) as response: + code = response.getcode() + body = response.read().decode("utf8") + if code not in (200, 201, 202): + hookenv.log( + "failed to update node status [{}]: {}".format(code, body), + hookenv.ERROR, + ) + return + except (json.JSONDecodeError, KeyError): + hookenv.log("failed to parse node status: {}".format(body), hookenv.ERROR) + return + + +def apiserverVersion(): + cmd = "kube-apiserver --version".split() + version_string = check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def touch(fname): + try: + os.utime(fname, None) + except OSError: + open(fname, "a").close() + + +def getStorageBackend(): + storage_backend = hookenv.config("storage-backend") + if storage_backend == "auto": + storage_backend = leader_get("auto_storage_backend") + return storage_backend + + +@when("leadership.is_leader") +@when_not("leadership.set.cluster_tag") +def create_cluster_tag(): + cluster_tag = "kubernetes-{}".format( + kubernetes_control_plane.token_generator().lower() + ) + leader_set(cluster_tag=cluster_tag) + + +@when("leadership.set.cluster_tag", "kube-control.connected") +def send_cluster_tag(): + cluster_tag = leader_get("cluster_tag") + kube_control = endpoint_from_flag("kube-control.connected") + kube_control.set_cluster_tag(cluster_tag) + + +@when_not("kube-control.connected") +def clear_cluster_tag_sent(): + remove_state("kubernetes-control-plane.cluster-tag-sent") + + +@when_any( + "endpoint.aws.joined", + "endpoint.gcp.joined", + "endpoint.openstack.joined", + "endpoint.vsphere.joined", + "endpoint.azure.joined", +) +@when_not("kubernetes-control-plane.cloud.ready") +def set_cloud_pending(): + k8s_version = get_version("kube-apiserver") + k8s_1_11 = k8s_version >= (1, 11) + k8s_1_12 = k8s_version >= (1, 12) + vsphere_joined = is_state("endpoint.vsphere.joined") + azure_joined = is_state("endpoint.azure.joined") + if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11): + set_state("kubernetes-control-plane.cloud.blocked") + else: + remove_state("kubernetes-control-plane.cloud.blocked") + set_state("kubernetes-control-plane.cloud.pending") + + +@when_any("endpoint.aws.joined", "endpoint.gcp.joined", "endpoint.azure.joined") +@when("leadership.set.cluster_tag") +@when_not("kubernetes-control-plane.cloud.request-sent") +def request_integration(): + hookenv.status_set("maintenance", "requesting cloud integration") + cluster_tag = leader_get("cluster_tag") + if is_state("endpoint.aws.joined"): + cloud = endpoint_from_flag("endpoint.aws.joined") + cloud.tag_instance( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + "k8s.io/role/master": "true", # wokeignore:rule=master + } + ) + cloud.tag_instance_security_group( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + } + ) + cloud.tag_instance_subnet( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + } + ) + cloud.enable_object_storage_management(["kubernetes-*"]) + cloud.enable_load_balancer_management() + elif is_state("endpoint.gcp.joined"): + cloud = endpoint_from_flag("endpoint.gcp.joined") + cloud.label_instance( + { + "k8s-io-cluster-name": cluster_tag, + "k8s-io-role-master": "master", # wokeignore:rule=master + } + ) + cloud.enable_object_storage_management() + cloud.enable_security_management() + elif is_state("endpoint.azure.joined"): + cloud = endpoint_from_flag("endpoint.azure.joined") + cloud.tag_instance( + { + "k8s-io-cluster-name": cluster_tag, + "k8s-io-role-master": "master", # wokeignore:rule=master + } + ) + cloud.enable_object_storage_management() + cloud.enable_security_management() + cloud.enable_loadbalancer_management() + cloud.enable_instance_inspection() + cloud.enable_network_management() + cloud.enable_dns_management() + cloud.enable_block_storage_management() + set_state("kubernetes-control-plane.cloud.request-sent") + + +@when_none( + "endpoint.aws.joined", + "endpoint.gcp.joined", + "endpoint.openstack.joined", + "endpoint.vsphere.joined", + "endpoint.azure.joined", +) +@when_any( + "kubernetes-control-plane.cloud.pending", + "kubernetes-control-plane.cloud.request-sent", + "kubernetes-control-plane.cloud.blocked", + "kubernetes-control-plane.cloud.ready", +) +def clear_cloud_flags(): + remove_state("kubernetes-control-plane.cloud.pending") + remove_state("kubernetes-control-plane.cloud.request-sent") + remove_state("kubernetes-control-plane.cloud.blocked") + remove_state("kubernetes-control-plane.cloud.ready") + clear_flag("kubernetes-control-plane.apiserver.configured") + clear_flag("kubernetes-control-plane.kubelet.configured") + _kick_controller_manager() + + +@when_any( + "endpoint.aws.ready", + "endpoint.gcp.ready", + "endpoint.openstack.ready", + "endpoint.vsphere.ready", + "endpoint.azure.ready", +) +@when_not( + "kubernetes-control-plane.cloud.blocked", "kubernetes-control-plane.cloud.ready" +) +def cloud_ready(): + if is_state("endpoint.gcp.ready"): + write_gcp_snap_config("kube-apiserver") + write_gcp_snap_config("kube-controller-manager") + write_gcp_snap_config("kubelet") + elif is_state("endpoint.vsphere.ready"): + _write_vsphere_snap_config("kube-apiserver") + _write_vsphere_snap_config("kube-controller-manager") + elif is_state("endpoint.azure.ready"): + write_azure_snap_config("kube-apiserver") + write_azure_snap_config("kube-controller-manager") + write_azure_snap_config("kubelet") + remove_state("kubernetes-control-plane.cloud.pending") + set_state("kubernetes-control-plane.cloud.ready") + remove_state("kubernetes-control-plane.components.started") # force restart + + +@when("kubernetes-control-plane.cloud.ready") +@when_any( + "endpoint.openstack.ready.changed", + "endpoint.vsphere.ready.changed", + "endpoint.azure.ready.changed", +) +def update_cloud_config(): + """Signal that cloud config has changed. + + Some clouds (openstack, vsphere) support runtime config that needs to be + reflected in the k8s cloud config files when changed. Manage flags to + ensure this happens. + """ + if is_state("endpoint.openstack.ready.changed"): + remove_state("endpoint.openstack.ready.changed") + set_state("kubernetes-control-plane.openstack.changed") + if is_state("endpoint.vsphere.ready.changed"): + remove_state("kubernetes-control-plane.cloud.ready") + remove_state("endpoint.vsphere.ready.changed") + if is_state("endpoint.azure.ready.changed"): + remove_state("kubernetes-control-plane.cloud.ready") + remove_state("endpoint.azure.ready.changed") + + +def _cdk_addons_template_path(): + return Path("/snap/cdk-addons/current/templates") + + +def _write_vsphere_snap_config(component): + # vsphere requires additional cloud config + vsphere = endpoint_from_flag("endpoint.vsphere.ready") + + # NB: vsphere provider will ask kube-apiserver and -controller-manager to + # find a uuid from sysfs unless a global config value is set. Our strict + # snaps cannot read sysfs, so let's do it in the charm. An invalid uuid is + # not fatal for storage, but it will muddy the logs; try to get it right. + uuid = _get_vmware_uuid() + + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "\n".join( + [ + "[Global]", + "insecure-flag = true", + 'datacenters = "{}"'.format(vsphere.datacenter), + 'vm-uuid = "VMware-{}"'.format(uuid), + '[VirtualCenter "{}"]'.format(vsphere.vsphere_ip), + 'user = "{}"'.format(vsphere.user), + 'password = "{}"'.format(vsphere.password), + "[Workspace]", + 'server = "{}"'.format(vsphere.vsphere_ip), + 'datacenter = "{}"'.format(vsphere.datacenter), + 'default-datastore = "{}"'.format(vsphere.datastore), + 'folder = "{}"'.format(vsphere.folder), + 'resourcepool-path = "{}"'.format(vsphere.respool_path), + "[Disk]", + 'scsicontrollertype = "pvscsi"', + ] + ) + ) + + +@when("config.changed.keystone-policy") +@when("kubernetes-control-plane.keystone-policy-handled") +def regen_keystone_policy(): + clear_flag("kubernetes-control-plane.keystone-policy-handled") + + +@when( + "keystone-credentials.available", + "leadership.is_leader", + "kubernetes-control-plane.apiserver.configured", +) +@when_not("kubernetes-control-plane.keystone-policy-handled") +def generate_keystone_configmap(): + keystone_policy = hookenv.config("keystone-policy") + if keystone_policy: + os.makedirs(keystone_root, exist_ok=True) + write_file_with_autogenerated_header(keystone_policy_path, keystone_policy) + if kubectl_manifest("apply", keystone_policy_path): + set_flag("kubernetes-control-plane.keystone-policy-handled") + clear_flag("kubernetes-control-plane.keystone-policy-error") + else: + set_flag("kubernetes-control-plane.keystone-policy-error") + else: + # a missing policy configmap will crashloop the pods, but... + # what do we do in this situation. We could just do nothing, + # but that isn't cool for the user so we surface an error + # and wait for them to fix it. + set_flag("kubernetes-control-plane.keystone-policy-error") + + # note that information is surfaced to the user in the code above where we + # write status. It will notify the user we are waiting on the policy file + # to apply if the keystone-credentials.available flag is set, but + # kubernetes-control-plane.keystone-policy-handled is not set. + + +@when("leadership.is_leader", "kubernetes-control-plane.keystone-policy-handled") +@when_not("keystone-credentials.available") +def remove_keystone(): + clear_flag("kubernetes-control-plane.apiserver.configured") + if not os.path.exists(keystone_policy_path): + clear_flag("kubernetes-control-plane.keystone-policy-handled") + elif kubectl_manifest("delete", keystone_policy_path): + os.remove(keystone_policy_path) + clear_flag("kubernetes-control-plane.keystone-policy-handled") + + +@when("keystone-credentials.connected") +def setup_keystone_user(): + # This seems silly, but until we request a user from keystone + # we don't get information about the keystone server... + ks = endpoint_from_flag("keystone-credentials.connected") + ks.request_credentials("k8s") + + +def _kick_controller_manager(): + if is_flag_set("kubernetes-control-plane.components.started"): + configure_controller_manager() + + +@when( + "keystone.credentials.configured", "leadership.set.keystone-cdk-addons-configured" +) +@when_not("keystone.apiserver.configured") +def keystone_kick_apiserver(): + clear_flag("kubernetes-control-plane.apiserver.configured") + + +@when( + "keystone-credentials.available", + "certificates.ca.available", + "certificates.client.cert.available", + "authentication.setup", + "etcd.available", + "leadership.set.keystone-cdk-addons-configured", +) +def keystone_config(): + # first, we have to have the service set up before we can render this stuff + ks = endpoint_from_flag("keystone-credentials.available") + data = { + "host": ks.credentials_host(), + "proto": ks.credentials_protocol(), + "port": ks.credentials_port(), + "version": ks.api_version(), + } + if data_changed("keystone", data): + remove_state("keystone.credentials.configured") + clear_flag("kubernetes-control-plane.apiserver.configured") + build_kubeconfig() + generate_keystone_configmap() + set_state("keystone.credentials.configured") + + +@when("layer.vault-kv.app-kv.set.encryption_key", "layer.vaultlocker.ready") +@when_not("kubernetes-control-plane.secure-storage.created") +def create_secure_storage(): + encryption_conf_dir = encryption_config_path().parent + encryption_conf_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + try: + vaultlocker.create_encrypted_loop_mount(encryption_conf_dir) + except vaultlocker.VaultLockerError: + # One common cause of this would be deploying on lxd. + # Should this be more fatal? + hookenv.log( + "Unable to create encrypted mount for storing encryption config.\n" + "{}".format(traceback.format_exc()), + level=hookenv.ERROR, + ) + set_flag("kubernetes-control-plane.secure-storage.failed") + clear_flag("kubernetes-control-plane.secure-storage.created") + else: + # TODO: If Vault isn't available, it's probably still better to encrypt + # anyway and store the key in plaintext and leadership than to just + # give up on encryption entirely. + _write_encryption_config() + # prevent an unnecessary service restart on this + # unit since we've already handled the change + clear_flag("layer.vault-kv.app-kv.changed.encryption_key") + # mark secure storage as ready + set_flag("kubernetes-control-plane.secure-storage.created") + clear_flag("kubernetes-control-plane.secure-storage.failed") + # restart to regen config + clear_flag("kubernetes-control-plane.apiserver.configured") + + +@when_not("layer.vaultlocker.ready") +@when("kubernetes-control-plane.secure-storage.created") +def revert_secure_storage(): + clear_flag("kubernetes-control-plane.secure-storage.created") + clear_flag("kubernetes-control-plane.secure-storage.failed") + clear_flag("kubernetes-control-plane.apiserver.configured") + + +@when("leadership.is_leader", "layer.vault-kv.ready") +@when_not("layer.vault-kv.app-kv.set.encryption_key") +def generate_encryption_key(): + app_kv = vault_kv.VaultAppKV() + app_kv["encryption_key"] = kubernetes_control_plane.token_generator(32) + + +@when( + "layer.vault-kv.app-kv.changed.encryption_key", + "kubernetes-control-plane.secure-storage.created", +) +def restart_apiserver_for_encryption_key(): + clear_flag("kubernetes-control-plane.apiserver.configured") + clear_flag("layer.vault-kv.app-kv.changed.encryption_key") + + +def _write_encryption_config(): + app_kv = vault_kv.VaultAppKV() + encryption_config_path().parent.mkdir(parents=True, exist_ok=True) + secret = app_kv["encryption_key"] + secret = base64.b64encode(secret.encode("utf8")).decode("utf8") + host.write_file( + path=str(encryption_config_path()), + perms=0o600, + content=yaml.safe_dump( + { + "kind": "EncryptionConfig", + "apiVersion": "v1", + "resources": [ + { + "resources": ["secrets"], + "providers": [ + { + "aescbc": { + "keys": [ + { + "name": "key1", + "secret": secret, + } + ], + } + }, + {"identity": {}}, + ], + } + ], + } + ), + ) + + +@when_any("config.changed.pod-security-policy") +def pod_security_policy_config_changed(): + clear_flag("kubernetes-control-plane.pod-security-policy.applied") + + +@when_any("config.changed.ha-cluster-vip", "config.changed.ha-cluster-dns") +def haconfig_changed(): + clear_flag("hacluster-configured") + + +@when("ha.connected", "kubernetes-control-plane.components.started") +@when_not("hacluster-configured") +def configure_hacluster(): + # get a new cert + if is_flag_set("certificates.available"): + send_data() + # update workers + if is_flag_set("kube-control.connected"): + send_api_urls() + if is_flag_set("kube-api-endpoint.available"): + push_service_data() + + set_flag("hacluster-configured") + + +@when_not("ha.connected") +@when("hacluster-configured") +def remove_hacluster(): + # get a new cert + if is_flag_set("certificates.available"): + send_data() + # update workers + if is_flag_set("kube-control.connected"): + send_api_urls() + if is_flag_set("kube-api-endpoint.available"): + push_service_data() + + clear_flag("hacluster-configured") + + +class InvalidDnsProvider(Exception): + def __init__(self, value): + self.value = value + + +def get_dns_provider(): + valid_dns_providers = ["auto", "core-dns", "kube-dns", "none"] + if get_version("kube-apiserver") < (1, 14): + valid_dns_providers.remove("core-dns") + + dns_provider = hookenv.config("dns-provider").lower() + if dns_provider not in valid_dns_providers: + raise InvalidDnsProvider(dns_provider) + + if dns_provider == "auto": + dns_provider = leader_get("auto_dns_provider") + # On new deployments, the first time this is called, auto_dns_provider + # hasn't been set yet. We need to make a choice now. + if not dns_provider: + if "core-dns" in valid_dns_providers: + dns_provider = "core-dns" + else: + dns_provider = "kube-dns" + + # LP: 1833089. Followers end up here when setting final status; ensure only + # leaders call leader_set. + if is_state("leadership.is_leader"): + leader_set(auto_dns_provider=dns_provider) + return dns_provider + + +@when("kube-control.connected") +@when_not("kubernetes-control-plane.sent-registry") +def send_registry_location(): + registry_location = hookenv.config("image-registry") + kube_control = endpoint_from_flag("kube-control.connected") + + # Send registry to workers + kube_control.set_registry_location(registry_location) + + # Construct and send the sandbox image (pause container) to our runtime + runtime = endpoint_from_flag("endpoint.container-runtime.available") + if not runtime: + hookenv.log( + "Container runtime not yet available, will retry setting sandbox image" + ) + return + + uri = get_sandbox_image_uri(registry_location) + runtime.set_config(sandbox_image=uri) + + set_flag("kubernetes-control-plane.sent-registry") + + +@when( + "leadership.is_leader", + "leadership.set.kubernetes-master-addons-restart-for-ca", + "kubernetes-control-plane.components.started", +) +def restart_addons_for_ca(): + try: + # Get deployments/daemonsets/statefulsets + output = kubectl( + "get", + "daemonset,deployment,statefulset", + "-o", + "json", + "--all-namespaces", + "-l", + "cdk-restart-on-ca-change=true", + ).decode("UTF-8") + deployments = json.loads(output)["items"] + + # Get ServiceAccounts + service_account_names = set( + ( + deployment["metadata"]["namespace"], + deployment["spec"]["template"]["spec"].get( + "serviceAccountName", "default" + ), + ) + for deployment in deployments + ) + service_accounts = [] + for namespace, name in service_account_names: + output = kubectl( + "get", "ServiceAccount", name, "-o", "json", "-n", namespace + ).decode("UTF-8") + service_account = json.loads(output) + service_accounts.append(service_account) + + # Get ServiceAccount secrets + secret_names = set() + for service_account in service_accounts: + namespace = service_account["metadata"]["namespace"] + for secret in service_account["secrets"]: + secret_names.add((namespace, secret["name"])) + secrets = [] + for namespace, name in secret_names: + output = kubectl( + "get", "Secret", name, "-o", "json", "-n", namespace + ).decode("UTF-8") + secret = json.loads(output) + secrets.append(secret) + + # Check secrets have updated CA + with open(ca_crt_path, "rb") as f: + ca = f.read() + encoded_ca = base64.b64encode(ca).decode("UTF-8") + mismatched_secrets = [ + secret for secret in secrets if secret["data"]["ca.crt"] != encoded_ca + ] + if mismatched_secrets: + hookenv.log( + "ServiceAccount secrets do not have correct ca.crt: " + + ",".join(secret["metadata"]["name"] for secret in mismatched_secrets) + ) + hookenv.log("Waiting to retry restarting addons") + return + + # Now restart the addons + for deployment in deployments: + kind = deployment["kind"] + namespace = deployment["metadata"]["namespace"] + name = deployment["metadata"]["name"] + hookenv.log("Restarting addon: %s %s %s" % (kind, namespace, name)) + kubectl("rollout", "restart", kind + "/" + name, "-n", namespace) + + leader_set({"kubernetes-master-addons-restart-for-ca": None}) + except Exception: + hookenv.log(traceback.format_exc()) + hookenv.log("Waiting to retry restarting addons") + + +def add_systemd_iptables_patch(): + source = "templates/kube-proxy-iptables-fix.sh" + dest = "/usr/local/bin/kube-proxy-iptables-fix.sh" + copyfile(source, dest) + os.chmod(dest, 0o775) + + template = "templates/service-iptables-fix.service" + dest_dir = "/etc/systemd/system" + os.makedirs(dest_dir, exist_ok=True) + service_name = "kube-proxy-iptables-fix.service" + copyfile(template, "{}/{}".format(dest_dir, service_name)) + + check_call(["systemctl", "daemon-reload"]) + + # enable and run the service + service_resume(service_name) + + +@when( + "leadership.is_leader", + "kubernetes-control-plane.components.started", + "endpoint.prometheus.joined", + "certificates.ca.available", +) +def register_prometheus_jobs(): + prometheus = endpoint_from_flag("endpoint.prometheus.joined") + tls = endpoint_from_flag("certificates.ca.available") + monitoring_token = get_token("system:monitoring") + + for relation in prometheus.relations: + endpoints = kubernetes_control_plane.get_internal_api_endpoints(relation) + if not endpoints: + continue + address, port = endpoints[0] + + templates_dir = Path("templates") + for job_file in Path("templates/prometheus").glob("*.yaml.j2"): + prometheus.register_job( + relation=relation, + job_name=job_file.name.split(".")[0], + job_data=yaml.safe_load( + render( + source=str(job_file.relative_to(templates_dir)), + target=None, # don't write file, just return data + context={ + "k8s_api_address": address, + "k8s_api_port": port, + "k8s_token": monitoring_token, + }, + ) + ), + ca_cert=tls.root_ca_cert, + ) + + +def detect_telegraf(): + # Telegraf uses the implicit juju-info relation, which makes it difficult + # to tell if it's related. The "best" option is to look for the subordinate + # charm on disk. + for charm_dir in Path("/var/lib/juju/agents").glob("unit-*/charm"): + metadata = yaml.safe_load((charm_dir / "metadata.yaml").read_text()) + if "telegraf" in metadata["name"]: + return True + else: + return False + + +@when( + "leadership.is_leader", + "kubernetes-control-plane.components.started", + "endpoint.grafana.joined", +) +def register_grafana_dashboards(): + grafana = endpoint_from_flag("endpoint.grafana.joined") + + # load conditional dashboards + dash_dir = Path("templates/grafana/conditional") + if is_flag_set("endpoint.prometheus.joined"): + dashboard = (dash_dir / "prometheus.json").read_text() + grafana.register_dashboard("prometheus", json.loads(dashboard)) + if detect_telegraf(): + dashboard = (dash_dir / "telegraf.json").read_text() + grafana.register_dashboard("telegraf", json.loads(dashboard)) + + # load automatic dashboards + dash_dir = Path("templates/grafana/autoload") + for dash_file in dash_dir.glob("*.json"): + dashboard = dash_file.read_text() + grafana.register_dashboard(dash_file.stem, json.loads(dashboard)) + + +@when("endpoint.aws-iam.ready") +@when_not("kubernetes-control-plane.aws-iam.configured") +def enable_aws_iam_webhook(): + # if etcd isn't available yet, we'll set this up later + # when we start the api server. + if is_flag_set("etcd.available"): + # call the other things we need to update + clear_flag("kubernetes-control-plane.apiserver.configured") + build_kubeconfig() + set_flag("kubernetes-control-plane.aws-iam.configured") + + +@when("kubernetes-control-plane.components.started", "endpoint.aws-iam.available") +def api_server_started(): + aws_iam = endpoint_from_flag("endpoint.aws-iam.available") + if aws_iam: + aws_iam.set_api_server_status(True) + + +@when_not("kubernetes-control-plane.components.started") +@when("endpoint.aws-iam.available") +def api_server_stopped(): + aws_iam = endpoint_from_flag("endpoint.aws-iam.available") + if aws_iam: + aws_iam.set_api_server_status(False) + + +@when("kube-control.connected") +def send_default_cni(): + """Send the value of the default-cni config to the kube-control relation. + This allows kubernetes-worker to use the same config value as well. + """ + default_cni = hookenv.config("default-cni") + kube_control = endpoint_from_flag("kube-control.connected") + kube_control.set_default_cni(default_cni) + + +@when("config.changed.default-cni") +def default_cni_changed(): + remove_state("kubernetes-control-plane.components.started") + + +@when( + "kubernetes-control-plane.components.started", + "kubernetes-control-plane.apiserver.configured", + "endpoint.container-runtime.available", +) +@when_not("kubernetes-control-plane.kubelet.configured") +def configure_kubelet(): + uid = hookenv.local_unit() + username = "system:node:{}".format(get_node_name().lower()) + group = "system:nodes" + token = get_token(username) + if not token: + setup_tokens(None, username, uid, group) + token = get_token(username) + if not token: + hookenv.log( + "Failed to create token for {}; will retry".format(username), + hookenv.WARNING, + ) + return + has_xcp = has_external_cloud_provider() + + local_endpoint = kubernetes_control_plane.get_local_api_endpoint() + local_url = kubernetes_control_plane.get_api_url(local_endpoint) + create_kubeconfig( + kubelet_kubeconfig_path, local_url, ca_crt_path, token=token, user="kubelet" + ) + + dns_ready, dns_ip, dns_port, dns_domain = get_dns_info() + if not dns_ready: + hookenv.log("DNS not ready, waiting to configure Kubelet") + return + dns_info = [dns_ip, dns_port, dns_domain] + db.set("kubernetes-master.kubelet.dns-used", dns_info) + + registry = hookenv.config("image-registry") + taints = hookenv.config("register-with-taints").split() + kubernetes_common.configure_kubelet( + dns_domain, dns_ip, registry, taints=taints, has_xcp=has_xcp + ) + service_restart("snap.kubelet.daemon") + set_state("node.label-config-required") + set_flag("kubernetes-control-plane.kubelet.configured") + + +@when( + "node.label-config-required", + "kubernetes-control-plane.kubelet.configured", + "kubernetes-control-plane.apiserver.configured", + "authentication.setup", +) +def apply_node_labels(): + # Label configuration complete. + label_maker = LabelMaker(kubeclientconfig_path) + try: + label_maker.apply_node_labels() + except LabelMaker.NodeLabelError: + return + remove_state("node.label-config-required") + + +@when_any("config.changed.kubelet-extra-args", "config.changed.kubelet-extra-config") +def reconfigure_kubelet(): + # LP bug #1826833, always delete the state file when extra config changes + # since CPU manager doesn’t support offlining and onlining of CPUs at runtime. + cpu_manager_state = "/var/lib/kubelet/cpu_manager_state" + if os.path.isfile(cpu_manager_state): + hookenv.log("Removing file: " + cpu_manager_state) + os.remove(cpu_manager_state) + clear_flag("kubernetes-control-plane.kubelet.configured") + + +@when("kubernetes-control-plane.kubelet.configured") +def watch_dns_for_changes(): + dns_ready, dns_ip, dns_port, dns_domain = get_dns_info() + dns_info = [dns_ip, dns_port, dns_domain] + previous_dns_info = db.get("kubernetes-master.kubelet.dns-used") + dns_changed = dns_info != previous_dns_info + if dns_ready and dns_changed: + hookenv.log("DNS info has changed, will reconfigure Kubelet") + clear_flag("kubernetes-control-plane.kubelet.configured") + + +@when("cni.available") +@when_not("kubernetes-control-plane.default-cni.configured") +def configure_default_cni(): + default_cni = hookenv.config("default-cni") + kubernetes_common.configure_default_cni(default_cni) + set_flag("kubernetes-control-plane.default-cni.configured") + + +@when("ceph-client.available") +@when_not("kubernetes-control-plane.ceph.permissions.requested") +def request_ceph_permissions(): + ceph_client = endpoint_from_flag("ceph-client.available") + request = ceph_client.get_current_request() or CephBrokerRq() + # Permissions needed for Ceph CSI + # https://github.com/ceph/ceph-csi/blob/v3.6.0/docs/capabilities.md + permissions = [ + "mon", + "profile rbd, allow r", + "mds", + "allow rw", + "mgr", + "allow rw", + "osd", + "profile rbd, allow rw tag cephfs metadata=*", + ] + client_name = hookenv.application_name() + request.add_op( + {"op": "set-key-permissions", "permissions": permissions, "client": client_name} + ) + ceph_client.send_request_if_needed(request) + set_flag("kubernetes-control-plane.ceph.permissions.requested") + + +HEAL_HANDLER = { + "kube-apiserver": { + "run": configure_apiserver, + "clear_flags": [ + "kubernetes-control-plane.apiserver.configured", + "kubernetes-control-plane.apiserver.running", + ], + }, + "kube-controller-manager": {"run": configure_controller_manager, "clear_flags": []}, + "kube-scheduler": {"run": configure_scheduler, "clear_flags": []}, + "kube-proxy": { + "run": start_control_plane, + "clear_flags": ["kubernetes-control-plane.components.started"], + }, + "kubelet": {"run": reconfigure_kubelet, "clear_flags": []}, +} diff --git a/kubernetes-control-plane/reactive/kubernetes_node_base.py b/kubernetes-control-plane/reactive/kubernetes_node_base.py new file mode 100644 index 0000000..1025fac --- /dev/null +++ b/kubernetes-control-plane/reactive/kubernetes_node_base.py @@ -0,0 +1,132 @@ +import os +from subprocess import check_call + +from charms.layer import snap +from charms.leadership import leader_get, leader_set +from charms.reactive import ( + clear_flag, + data_changed, + hook, + set_flag, + set_state, + when, + when_not, +) + +from charmhelpers.core import hookenv +from charmhelpers.core.host import is_container +from charmhelpers.core.sysctl import create as create_sysctl +from charms.layer.kubernetes_common import arch + + +@hook("upgrade-charm") +def upgrade_charm(): + clear_flag("kubernetes.cni-plugins.installed") + + +@when_not("kubernetes.cni-plugins.installed") +def install_cni_plugins(): + """Unpack the cni-plugins resource""" + hookenv.status_set("maintenance", "Installing CNI plugins") + + # Get the resource via resource_get + try: + resource_name = "cni-{}".format(arch()) + archive = hookenv.resource_get(resource_name) + except Exception: + message = "Error fetching the cni resource." + hookenv.log(message) + return + + if not archive: + hookenv.log("Missing cni resource.") + return + + # Handle null resource publication, we check if filesize < 1mb + filesize = os.stat(archive).st_size + if filesize < 1000000: + hookenv.log("Incomplete cni resource.") + return + + unpack_path = "/opt/cni/bin" + os.makedirs(unpack_path, exist_ok=True) + cmd = ["tar", "xfvz", archive, "-C", unpack_path] + hookenv.log(cmd) + check_call(cmd) + + set_flag("kubernetes.cni-plugins.installed") + + +@when("kubernetes-node.snaps.installed") +@when("snap.refresh.set") +@when("leadership.is_leader") +def process_snapd_timer(): + """ + Set the snapd refresh timer on the leader so all cluster members + (present and future) will refresh near the same time. + + :return: None + """ + # Get the current snapd refresh timer; we know layer-snap has set this + # when the 'snap.refresh.set' flag is present. + timer = snap.get(snapname="core", key="refresh.timer").decode("utf-8").strip() + if not timer: + # The core snap timer is empty. This likely means a subordinate timer + # reset ours. Try to set it back to a previously leader-set value, + # falling back to config if needed. Luckily, this should only happen + # during subordinate install, so this should remain stable afterward. + timer = leader_get("snapd_refresh") or hookenv.config("snapd_refresh") + snap.set_refresh_timer(timer) + + # Ensure we have the timer known by snapd (it may differ from config). + timer = snap.get(snapname="core", key="refresh.timer").decode("utf-8").strip() + + # The first time through, data_changed will be true. Subsequent calls + # should only update leader data if something changed. + if data_changed("snapd_refresh", timer): + hookenv.log("setting leader snapd_refresh timer to: {}".format(timer)) + leader_set({"snapd_refresh": timer}) + + +@when("kubernetes-node.snaps.installed") +@when("snap.refresh.set") +@when("leadership.changed.snapd_refresh") +@when_not("leadership.is_leader") +def set_snapd_timer(): + """ + Set the snapd refresh.timer on non-leader cluster members. + + :return: None + """ + # NB: This method should only be run when 'snap.refresh.set' is present. + # Layer-snap will always set a core refresh.timer, which may not be the + # same as our leader. Gating with 'snap.refresh.set' ensures layer-snap + # has finished and we are free to set our config to the leader's timer. + timer = leader_get("snapd_refresh") or "" # None will error + hookenv.log("setting snapd_refresh timer to: {}".format(timer)) + snap.set_refresh_timer(timer) + + +@when("config.changed.sysctl") +def write_sysctl(): + """ + :return: None + """ + sysctl_settings = hookenv.config("sysctl") + if sysctl_settings and not is_container(): + create_sysctl( + sysctl_settings, + "/etc/sysctl.d/50-kubernetes-charm.conf", + # Some keys in the config may not exist in /proc/sys/net/. + # For example, the conntrack module may not be loaded when + # using lxd drivers insteam of kvm. In these cases, we + # simply ignore the missing keys, rather than making time + # consuming calls out to the filesystem to check for their + # existence. + ignore=True, + ) + + +@when("config.changed.labels") +def handle_labels_changed(): + set_state("node.label-config-required") diff --git a/kubernetes-control-plane/reactive/leadership.py b/kubernetes-control-plane/reactive/leadership.py new file mode 100644 index 0000000..29c6f3a --- /dev/null +++ b/kubernetes-control-plane/reactive/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.leadership import leader_get, leader_set + + +__all__ = ['leader_get', 'leader_set'] # Backwards compatibility + + +def initialize_leadership_state(): + '''Initialize leadership.* states from the hook environment. + + Invoked by hookenv.atstart() so states are available in + @hook decorated handlers. + ''' + is_leader = hookenv.is_leader() + if is_leader: + hookenv.log('Initializing Leadership Layer (is leader)') + else: + hookenv.log('Initializing Leadership Layer (is follower)') + + reactive.helpers.toggle_state('leadership.is_leader', is_leader) + + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + current = hookenv.leader_get() + + # Handle deletions. + for key in set(previous.keys()) - set(current.keys()): + current[key] = None + + any_changed = False + for key, value in current.items(): + reactive.helpers.toggle_state('leadership.changed.{}'.format(key), + value != previous.get(key)) + if value != previous.get(key): + any_changed = True + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + reactive.helpers.toggle_state('leadership.changed', any_changed) + + unitdata.kv().update(current, prefix='leadership.settings.') + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(reactive, '_leadership_registered'): + hookenv.atstart(initialize_leadership_state) + reactive._leadership_registered = True diff --git a/kubernetes-control-plane/reactive/snap.py b/kubernetes-control-plane/reactive/snap.py new file mode 100644 index 0000000..2220648 --- /dev/null +++ b/kubernetes-control-plane/reactive/snap.py @@ -0,0 +1,341 @@ +# Copyright 2016-2019 Canonical Ltd. +# +# This file is part of the Snap layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +charms.reactive helpers for dealing with Snap packages. +""" +from collections import OrderedDict +from distutils.version import LooseVersion +import os.path +from os import uname +import shutil +import subprocess +from textwrap import dedent +import time +from urllib.request import urlretrieve + +from charmhelpers.core import hookenv, host +from charmhelpers.core.hookenv import ERROR +from charmhelpers.core.host import write_file +from charms import layer +from charms import reactive +from charms.layer import snap +from charms.reactive import register_trigger, when, when_not, toggle_flag +from charms.reactive.helpers import data_changed + + +class UnsatisfiedMinimumVersionError(Exception): + def __init__(self, desired, actual): + super().__init__() + self.desired = desired + self.actual = actual + + def __str__(self): + return "Could not install snapd >= {0.desired}, got {0.actual}".format(self) + + +class InvalidBundleError(Exception): + pass + + +def sorted_snap_opts(): + opts = layer.options("snap") + opts = sorted(opts.items(), key=lambda item: item[0] != "core") + opts = OrderedDict(opts) + return opts + + +def install(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + opts = sorted_snap_opts() + # supported-architectures is EXPERIMENTAL and undocumented. + # It probably should live in the base layer, blocking the charm + # during bootstrap if the arch is unsupported. + arch = uname().machine + for snapname, snap_opts in opts.items(): + supported_archs = snap_opts.pop("supported-architectures", None) + if supported_archs and arch not in supported_archs: + # Note that this does *not* error. The charm will need to + # cope with the snaps it requested never getting installed, + # likely by doing its own check on supported-architectures. + hookenv.log( + "Snap {} not supported on {!r} architecture" "".format(snapname, arch), + ERROR, + ) + continue + installed_flag = "snap.installed.{}".format(snapname) + if not reactive.is_flag_set(installed_flag): + snap.install(snapname, **snap_opts) + if data_changed("snap.install.opts", opts): + snap.connect_all() + + +def check_refresh_available(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + available_refreshes = snap.get_available_refreshes() + for snapname in snap.get_installed_snaps(): + toggle_flag(snap.get_refresh_available_flag(snapname), snapname in available_refreshes) + + +def refresh(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + opts = sorted_snap_opts() + # supported-architectures is EXPERIMENTAL and undocumented. + # It probably should live in the base layer, blocking the charm + # during bootstrap if the arch is unsupported. + arch = uname()[4] + check_refresh_available() + for snapname, snap_opts in opts.items(): + supported_archs = snap_opts.pop("supported-architectures", None) + if supported_archs and arch not in supported_archs: + continue + snap.refresh(snapname, **snap_opts) + snap.connect_all() + + +@reactive.hook("upgrade-charm") +def upgrade_charm(): + refresh() + + +def get_series(): + return subprocess.check_output(["lsb_release", "-sc"], universal_newlines=True).strip() + + +def snapd_supported(): + # snaps are not supported in trusty lxc containers. + if get_series() == "trusty" and host.is_container(): + return False + return True # For all other cases, assume true. + + +def kernel_supported(): + kernel_version = uname().release + + if LooseVersion(kernel_version) < LooseVersion("4.4"): + hookenv.log( + "Snaps do not work on kernel {}, a reboot " + "into a supported kernel (>4.4) is required" + "".format(kernel_version) + ) + return False + return True + + +def ensure_snapd(): + if not snapd_supported(): + hookenv.log("Snaps do not work in this environment", hookenv.ERROR) + raise Exception("Snaps do not work in this environment") + + # I don't use the apt layer, because that would tie this layer + # too closely to apt packaging. Perhaps this is a snap-only system. + if not shutil.which("snap"): + os.environ["DEBIAN_FRONTEND"] = "noninteractive" + cmd = ["apt-get", "install", "-y", "snapd"] + # LP:1699986: Force install of systemd on Trusty. + if get_series() == "trusty": + cmd.append("systemd") + subprocess.check_call(cmd, universal_newlines=True) + + +def proxy_settings(): + proxy_vars = ("http_proxy", "https_proxy") + proxy_env = {key: value for key, value in os.environ.items() if key in proxy_vars} + + snap_proxy = hookenv.config().get("snap_proxy") + if snap_proxy: + proxy_env["http_proxy"] = snap_proxy + proxy_env["https_proxy"] = snap_proxy + return proxy_env + + +def update_snap_proxy(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + # This is a hack based on + # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1 + # Do it properly when Bug #1533899 is addressed. + # Note we can't do this in a standard reactive handler as we need + # to ensure proxies are configured before attempting installs or + # updates. + proxy = proxy_settings() + + override_dir = "/etc/systemd/system/snapd.service.d" + path = os.path.join(override_dir, "snap_layer_proxy.conf") + if not proxy and not os.path.exists(path): + return # No proxy asked for and proxy never configured. + + # It seems we cannot rely on this directory existing, so manually + # create it. + if not os.path.exists(override_dir): + host.mkdir(override_dir, perms=0o755) + + if not data_changed("snap.proxy", proxy): + return # Short circuit avoids unnecessary restarts. + + if proxy: + create_snap_proxy_conf(path, proxy) + else: + remove_snap_proxy_conf(path) + subprocess.check_call(["systemctl", "daemon-reload"], universal_newlines=True) + time.sleep(2) + subprocess.check_call(["systemctl", "restart", "snapd.service"], universal_newlines=True) + + +def create_snap_proxy_conf(path, proxy): + host.mkdir(os.path.dirname(path)) + content = dedent( + """\ + # Managed by Juju + [Service] + """ + ) + for proxy_key, proxy_value in proxy.items(): + content += "Environment={}={}\n".format(proxy_key, proxy_value) + host.write_file(path, content.encode()) + + +def remove_snap_proxy_conf(path): + if os.path.exists(path): + os.remove(path) + + +def ensure_path(): + # Per Bug #1662856, /snap/bin may be missing from $PATH. Fix this. + if "/snap/bin" not in os.environ["PATH"].split(":"): + os.environ["PATH"] += ":/snap/bin" + + +def _get_snapd_version(): + stdout = subprocess.check_output(["snap", "version"], stdin=subprocess.DEVNULL, universal_newlines=True) + version_info = dict(line.split(None, 1) for line in stdout.splitlines()) + return LooseVersion(version_info["snapd"]) + + +PREFERENCES = """\ +Package: * +Pin: release a={}-proposed +Pin-Priority: 400 +""" + + +def ensure_snapd_min_version(min_version): + snapd_version = _get_snapd_version() + if snapd_version < LooseVersion(min_version): + from charmhelpers.fetch import add_source, apt_update, apt_install + + # Temporary until LP:1735344 lands + add_source("distro-proposed", fail_invalid=True) + distro = get_series() + # disable proposed by default, needs to explicit + write_file( + "/etc/apt/preferences.d/proposed", + PREFERENCES.format(distro), + ) + apt_update() + # explicitly install snapd from proposed + apt_install("snapd/{}-proposed".format(distro)) + snapd_version = _get_snapd_version() + if snapd_version < LooseVersion(min_version): + hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR) + raise UnsatisfiedMinimumVersionError(min_version, snapd_version) + + +def download_assertion_bundle(proxy_url): + """Download proxy assertion bundle and store id""" + assertions_url = "{}/v2/auth/store/assertions".format(proxy_url) + local_bundle, headers = urlretrieve(assertions_url) + store_id = headers["X-Assertion-Store-Id"] + return local_bundle, store_id + + +def configure_snap_store_proxy(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + if not reactive.is_flag_set("config.changed.snap_proxy_url"): + return + config = hookenv.config() + if "snap_proxy_url" not in config: + # The deprecated snap_proxy_url config items have been removed + # from config.yaml. If the charm author hasn't added them back + # explicitly, there is nothing to do. Juju is maintaining these + # settings as model configuration. + return + snap_store_proxy_url = config.get("snap_proxy_url") + if not snap_store_proxy_url and not config.previous("snap_proxy_url"): + # Proxy url is not set, and was not set previous hook. Do nothing, + # to avoid overwriting the Juju maintained setting. + return + ensure_snapd_min_version("2.30") + if snap_store_proxy_url: + bundle, store_id = download_assertion_bundle(snap_store_proxy_url) + try: + subprocess.check_output( + ["snap", "ack", bundle], + stdin=subprocess.DEVNULL, + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + raise InvalidBundleError("snapd could not ack the proxy assertion: " + e.output) + else: + store_id = "" + + try: + subprocess.check_output( + ["snap", "set", "core", "proxy.store={}".format(store_id)], + stdin=subprocess.DEVNULL, + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + raise InvalidBundleError("Proxy ID from header did not match store assertion: " + e.output) + + +register_trigger(when="config.changed.snapd_refresh", clear_flag="snap.refresh.set") + + +@when_not("snap.refresh.set") +@when("snap.installed.core") +def change_snapd_refresh(): + """Set the system refresh.timer option""" + ensure_snapd_min_version("2.31") + timer = hookenv.config()["snapd_refresh"] + was_set = reactive.is_flag_set("snap.refresh.was-set") + if timer or was_set: + snap.set_refresh_timer(timer) + reactive.toggle_flag("snap.refresh.was-set", timer) + reactive.set_flag("snap.refresh.set") + + +# Bootstrap. We don't use standard reactive handlers to ensure that +# everything is bootstrapped before any charm handlers are run. +hookenv.atstart(hookenv.log, "Initializing Snap Layer") +hookenv.atstart(ensure_snapd) +hookenv.atstart(ensure_path) +hookenv.atstart(update_snap_proxy) +hookenv.atstart(configure_snap_store_proxy) +hookenv.atstart(install) diff --git a/kubernetes-control-plane/reactive/status.py b/kubernetes-control-plane/reactive/status.py new file mode 100644 index 0000000..2f33f3f --- /dev/null +++ b/kubernetes-control-plane/reactive/status.py @@ -0,0 +1,4 @@ +from charms import layer + + +layer.status._initialize() diff --git a/kubernetes-control-plane/reactive/tls_client.py b/kubernetes-control-plane/reactive/tls_client.py new file mode 100644 index 0000000..afa2228 --- /dev/null +++ b/kubernetes-control-plane/reactive/tls_client.py @@ -0,0 +1,208 @@ +import os + +from pathlib import Path +from subprocess import check_call + +from charms import layer +from charms.reactive import hook +from charms.reactive import set_state, remove_state +from charms.reactive import when +from charms.reactive import set_flag, clear_flag +from charms.reactive import endpoint_from_flag +from charms.reactive.helpers import data_changed + +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core.hookenv import log + + +@when('certificates.ca.available') +def store_ca(tls): + '''Read the certificate authority from the relation object and install + the ca on this system.''' + # Get the CA from the relationship object. + certificate_authority = tls.get_ca() + if certificate_authority: + layer_options = layer.options('tls-client') + ca_path = layer_options.get('ca_certificate_path') + changed = data_changed('certificate_authority', certificate_authority) + if ca_path: + if changed or not os.path.exists(ca_path): + log('Writing CA certificate to {0}'.format(ca_path)) + # ensure we have a newline at the end of the certificate. + # some things will blow up without one. + # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034 + if not certificate_authority.endswith('\n'): + certificate_authority += '\n' + _write_file(ca_path, certificate_authority) + set_state('tls_client.ca.written') + set_state('tls_client.ca.saved') + if changed: + # Update /etc/ssl/certs and generate ca-certificates.crt + install_ca(certificate_authority) + + +@when('certificates.server.cert.available') +def store_server(tls): + '''Read the server certificate and server key from the relation object + and save them to the certificate directory..''' + server_cert, server_key = tls.get_server_cert() + chain = tls.get_chain() + if chain: + server_cert = server_cert + '\n' + chain + if server_cert and server_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('server_certificate_path') + key_path = layer_options.get('server_key_path') + cert_changed = data_changed('server_certificate', server_cert) + key_changed = data_changed('server_key', server_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing server certificate to {0}'.format(cert_path)) + _write_file(cert_path, server_cert) + set_state('tls_client.server.certificate.written') + set_state('tls_client.server.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing server key to {0}'.format(key_path)) + _write_file(key_path, server_key) + set_state('tls_client.server.key.saved') + + +@when('certificates.client.cert.available') +def store_client(tls): + '''Read the client certificate and client key from the relation object + and copy them to the certificate directory.''' + client_cert, client_key = tls.get_client_cert() + chain = tls.get_chain() + if chain: + client_cert = client_cert + '\n' + chain + if client_cert and client_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('client_certificate_path') + key_path = layer_options.get('client_key_path') + cert_changed = data_changed('client_certificate', client_cert) + key_changed = data_changed('client_key', client_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing client certificate to {0}'.format(cert_path)) + _write_file(cert_path, client_cert) + set_state('tls_client.client.certificate.written') + set_state('tls_client.client.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing client key to {0}'.format(key_path)) + _write_file(key_path, client_key) + set_state('tls_client.client.key.saved') + + +@when('certificates.certs.changed') +def update_certs(): + tls = endpoint_from_flag('certificates.certs.changed') + certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {}) + all_ready = True + any_changed = False + maps = { + 'server': tls.server_certs_map, + 'client': tls.client_certs_map, + } + + if maps.get('client') == {}: + log( + 'No client certs found using maps. Checking for global \ + client certificates.', + 'WARNING' + ) + # Check for global certs, + # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819 + cert_pair = tls.get_client_cert() + if cert_pair is not None: + for client_name in certs_paths.get('client', {}).keys(): + maps.get('client').update({ + client_name: cert_pair + }) + + chain = tls.get_chain() + for cert_type in ('server', 'client'): + for common_name, paths in certs_paths.get(cert_type, {}).items(): + cert_pair = maps[cert_type].get(common_name) + if not cert_pair: + all_ready = False + continue + if not data_changed('layer.tls-client.' + '{}.{}'.format(cert_type, common_name), cert_pair): + continue + + cert = None + key = None + if type(cert_pair) is not tuple: + if paths['crt']: + cert = cert_pair.cert + if paths['key']: + key = cert_pair.key + else: + cert, key = cert_pair + + if cert: + if chain: + cert = cert + '\n' + chain + _ensure_directory(paths['crt']) + Path(paths['crt']).write_text(cert) + + if key: + _ensure_directory(paths['key']) + Path(paths['key']).write_text(key) + + any_changed = True + # clear flags first to ensure they are re-triggered if left set + clear_flag('tls_client.{}.certs.changed'.format(cert_type)) + clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + set_flag('tls_client.{}.certs.changed'.format(cert_type)) + set_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + if all_ready: + set_flag('tls_client.certs.saved') + if any_changed: + clear_flag('tls_client.certs.changed') + set_flag('tls_client.certs.changed') + clear_flag('certificates.certs.changed') + + +def install_ca(certificate_authority): + '''Install a certificiate authority on the system by calling the + update-ca-certificates command.''' + if certificate_authority: + name = hookenv.service_name() + # Create a path to install CAs on Debian systems. + ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name) + log('Writing CA certificate to {0}'.format(ca_path)) + _write_file(ca_path, certificate_authority) + # Update the trusted CAs on this system (a time expensive operation). + check_call(['update-ca-certificates']) + log('Generated ca-certificates.crt for {0}'.format(name)) + set_state('tls_client.ca_installed') + + +@hook('upgrade-charm') +def remove_states(): + remove_state('tls_client.ca.saved') + remove_state('tls_client.server.certificate.saved') + remove_state('tls_client.server.key.saved') + remove_state('tls_client.client.certificate.saved') + remove_state('tls_client.client.key.saved') + + +def _ensure_directory(path): + '''Ensure the parent directory exists creating directories if necessary.''' + directory = os.path.dirname(path) + if not os.path.isdir(directory): + os.makedirs(directory) + os.chmod(directory, 0o770) + + +def _write_file(path, content): + '''Write the path to a file.''' + _ensure_directory(path) + with open(path, 'w') as stream: + stream.write(content) + os.chmod(path, 0o440) diff --git a/kubernetes-control-plane/reactive/vault_kv.py b/kubernetes-control-plane/reactive/vault_kv.py new file mode 100644 index 0000000..78ae3b6 --- /dev/null +++ b/kubernetes-control-plane/reactive/vault_kv.py @@ -0,0 +1,67 @@ +from charmhelpers.core import hookenv, host +from charms.reactive import when_all, when_not, set_flag, clear_flag +from charms.reactive import endpoint_from_flag, register_trigger +from charms.reactive import data_changed + +from charms.layer import vault_kv + + +register_trigger(when_not="vault-kv.connected", clear_flag="layer.vault-kv.ready") +register_trigger(when_not="vault-kv.connected", clear_flag="layer.vault-kv.requested") + + +@when_all("vault-kv.connected") +@when_not("layer.vault-kv.requested") +def request_vault_access(): + vault = endpoint_from_flag("vault-kv.connected") + backend_name = vault_kv._get_secret_backend() + # backend can't be isolated or VaultAppKV won't work; see issue #2 + vault.request_secret_backend(backend_name, isolated=False) + set_flag("layer.vault-kv.requested") + + +@when_all("vault-kv.available") +def set_ready(): + try: + vault_kv.get_vault_config() + except vault_kv.VaultNotReady: + clear_flag("layer.vault-kv.ready") + else: + set_flag("layer.vault-kv.ready") + + +@when_all("layer.vault-kv.ready") +def check_config_changed(): + try: + config = vault_kv.get_vault_config() + except vault_kv.VaultNotReady: + return + else: + if data_changed("layer.vault-kv.config", config): + set_flag("layer.vault-kv.config.changed") + + +def manage_app_kv_flags(): + try: + app_kv = vault_kv.VaultAppKV() + for key in app_kv.keys(): + app_kv._manage_flags(key) + except vault_kv.VaultNotReady: + vault_kv.VaultAppKV._clear_all_flags() + + +def update_app_kv_hashes(): + try: + app_kv = vault_kv.VaultAppKV() + if app_kv.any_changed(): + if hookenv.is_leader(): + # force hooks to run on non-leader units + hookenv.leader_set({"vault-kv-nonce": host.pwgen(8)}) + # Update the local unit hashes at successful exit + app_kv.update_hashes() + except vault_kv.VaultNotReady: + return + + +hookenv.atstart(manage_app_kv_flags) +hookenv.atexit(update_app_kv_hashes) diff --git a/kubernetes-control-plane/reactive/vaultlocker.py b/kubernetes-control-plane/reactive/vaultlocker.py new file mode 100644 index 0000000..d591c1e --- /dev/null +++ b/kubernetes-control-plane/reactive/vaultlocker.py @@ -0,0 +1,49 @@ +import shutil + +from charms.reactive import when_all, when_not, set_flag, clear_flag +from charmhelpers.core import hookenv, host + +from charms import apt +from charms import layer + + +@when_not('apt.installed.vaultlocker') +def install_vaultlocker(): + '''Install vaultlocker. + + On bionic and higher, vaultlocker is available in the default system + sources. For xenial, we need to add the queens cloud archive. + ''' + dist = host.lsb_release() + dist_series = dist['DISTRIB_CODENAME'].lower() + if dist_series == 'xenial': + apt.add_source('cloud:queens') + apt.update() + apt.queue_install(['vaultlocker']) + + +@when_all('apt.installed.vaultlocker', + 'layer.vault-kv.ready', + 'layer.vault-kv.config.changed') +def configure_vaultlocker(): + # write VaultLocker config file + layer.vaultlocker.write_vaultlocker_conf(layer.vault_kv.get_vault_config()) + # create location for loop device service envs + layer.vaultlocker.LOOP_ENVS.mkdir(parents=True, exist_ok=True) + # create loop device service template + shutil.copyfile('templates/vaultlocker-loop@.service', + '/etc/systemd/system/vaultlocker-loop@.service') + # mark as complete + set_flag('layer.vaultlocker.configured') + clear_flag('layer.vault-kv.config.changed') + + +@when_all('layer.vaultlocker.configured') +@when_not('layer.vaultlocker.ready') +def auto_encrypt(): + metadata = hookenv.metadata() + for storage_name, storage_metadata in metadata.get('storage', {}).items(): + if storage_metadata.get('vaultlocker-encrypt', False): + mountbase = storage_metadata.get('vaultlocker-mountbase') + layer.vaultlocker.encrypt_storage(storage_name, mountbase) + set_flag('layer.vaultlocker.ready') diff --git a/kubernetes-control-plane/requirements.txt b/kubernetes-control-plane/requirements.txt new file mode 100644 index 0000000..55543d9 --- /dev/null +++ b/kubernetes-control-plane/requirements.txt @@ -0,0 +1,3 @@ +mock +flake8 +pytest diff --git a/kubernetes-control-plane/revision b/kubernetes-control-plane/revision new file mode 100644 index 0000000..c227083 --- /dev/null +++ b/kubernetes-control-plane/revision @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/kubernetes-control-plane/setup.py b/kubernetes-control-plane/setup.py new file mode 100755 index 0000000..b30bff5 --- /dev/null +++ b/kubernetes-control-plane/setup.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import os +from setuptools import setup + +here = os.path.abspath(os.path.dirname(__file__)) + +with open(os.path.join(here, "README.md")) as f: + README = f.read() + +setup( + name="layer_snap", + version="1.0.0", + description="layer_snap", + long_description=README, + license="Apache License 2.0", + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + ], + url="https://git.launchpad.net/layer-snap", + package_dir={"": "lib"}, + packages=["charms/layer"], + include_package_data=True, + zip_safe=False, + install_requires=["charmhelpers", "charms.reactive"], +) diff --git a/kubernetes-control-plane/templates/cdk-service-kicker b/kubernetes-control-plane/templates/cdk-service-kicker new file mode 100644 index 0000000..26d3740 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk-service-kicker @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +# This service runs on boot to work around issues relating to LXD and snapd. + +# Workaround for https://github.com/conjure-up/conjure-up/issues/1448 +if [ -f '/proc/1/environ' ] && grep -q '^container=lxc' /proc/1/environ; then + echo "lxc detected, applying snapd apparmor profiles" + (set +e + apparmor_parser /var/lib/snapd/apparmor/profiles/* + echo "apparmor_parser: exit status $?" + ) +else + echo "lxc not detected, skipping snapd apparmor profiles" +fi + +# Workaround for https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357 +services="{{services}}" + +deadline="$(expr "$(date +%s)" + 600)" + +while [ "$(date +%s)" -lt "$deadline" ]; do + for service in $services; do + echo "$service: checking" + if ! systemctl is-active "$service"; then + echo "$service: not active, restarting" + systemctl restart "$service" || true + fi + done + + sleep 10 +done + +echo "deadline has passed, exiting gracefully" diff --git a/kubernetes-control-plane/templates/cdk-service-kicker.service b/kubernetes-control-plane/templates/cdk-service-kicker.service new file mode 100644 index 0000000..5c2105e --- /dev/null +++ b/kubernetes-control-plane/templates/cdk-service-kicker.service @@ -0,0 +1,10 @@ +[Unit] +Description=cdk-service-kicker + +[Service] +ExecStart=/usr/bin/cdk-service-kicker +Restart=on-failure +Type=simple + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-control-plane/templates/cdk.auth-webhook-secret.yaml b/kubernetes-control-plane/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubernetes-control-plane/templates/cdk.master.auth-webhook-conf.yaml b/kubernetes-control-plane/templates/cdk.master.auth-webhook-conf.yaml new file mode 100644 index 0000000..e2d3fa0 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.auth-webhook-conf.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Config +clusters: + - name: authn + cluster: + certificate-authority: /root/cdk/ca.crt + server: https://{{ host }}:{{ port }}/{{ api_ver }} +users: + - name: kube-apiserver +contexts: +- context: + cluster: authn + user: kube-apiserver + name: authn +current-context: authn diff --git a/kubernetes-control-plane/templates/cdk.master.auth-webhook.logrotate b/kubernetes-control-plane/templates/cdk.master.auth-webhook.logrotate new file mode 100644 index 0000000..d099b58 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.auth-webhook.logrotate @@ -0,0 +1,11 @@ +/var/log/kubernetes/{{logfile}} { + daily + rotate 10 + missingok + notifempty + compress + sharedscripts + postrotate + kill -USR1 $(cat /run/{{ pidfile }}) + endscript +} diff --git a/kubernetes-control-plane/templates/cdk.master.auth-webhook.py b/kubernetes-control-plane/templates/cdk.master.auth-webhook.py new file mode 100644 index 0000000..efcb969 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.auth-webhook.py @@ -0,0 +1,424 @@ +#!/usr/bin/env python3 + +import csv +import json +import logging +import aiohttp +import asyncio +import signal +from base64 import b64decode +from copy import deepcopy +from pathlib import Path +from yaml import safe_load, YAMLError + + +AWS_IAM_ENDPOINT = '{{ aws_iam_endpoint if aws_iam_endpoint }}' +KEYSTONE_ENDPOINT = '{{ keystone_endpoint if keystone_endpoint }}' +CUSTOM_AUTHN_ENDPOINT = '{{ custom_authn_endpoint if custom_authn_endpoint }}' + +app = aiohttp.web.Application() +routes = aiohttp.web.RouteTableDef() + +# Disable the gunicorn arbiter's SIGCHLD handler in this worker. The handler +# gets inherited by worker processes where it appears to serve no useful +# function. It also makes it impossible for workers to make subprocess calls +# safely, so, disable it. +# https://bugs.launchpad.net/charm-kubernetes-control-plane/+bug/1938470 +signal.signal(signal.SIGCHLD, signal.SIG_DFL) + + +async def run(*args, timeout=10, **kwargs): + '''Run a CLI command. + + Returns retcode, stdout, and stderr (already decoded). + + If the process times out, the exit code will be 124 and stdout and stderr + will be empty. + + NOTE: + In Python 3.8+, the default process child watcher, ThreadedChildWatcher, + appears to have a race condition where it frequently attempts to wait for + the child process PID before it's visible, leading to a spurious warning + in the log about "Unknown child process", and a 255 exit code regardless + of what the child process actually exits with. The stdout and stderr will + still be available, however. + ''' + args = [str(arg) for arg in args] + kwargs.update( + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + async def _run(): + proc = await asyncio.create_subprocess_exec(*args, **kwargs) + stdout, stderr = await proc.communicate() + return proc.returncode, stdout.decode('utf8'), stderr.decode('utf8') + + try: + return await asyncio.wait_for(_run(), timeout=timeout) + except asyncio.TimeoutError: + app.logger.exception('Command timed out: {}'.format(' '.join(args))) + return 124, '', '' + + +async def kubectl(*args): + '''Run a kubectl CLI command with a config file. + + Returns retcode, stdout, and stderr. + ''' + # Try to use our service account kubeconfig; fall back to root if needed + kubectl_cmd = Path('/snap/bin/kubectl') + if not kubectl_cmd.is_file(): + # Fall back to anywhere on the path if the snap isn't available + kubectl_cmd = 'kubectl' + return await run(kubectl_cmd, '--kubeconfig=/root/.kube/config', *args) + + +def log_secret(text, obj, hide=True): + '''Log information about a TokenReview object. + + The message will always be logged at the 'debug' level and will be in the + form "text: obj". By default, secrets will be hidden. Set 'hide=False' to + have the secret printed in the output unobfuscated. + ''' + log_obj = obj + if obj and hide: + log_obj = deepcopy(obj) + try: + log_obj['spec']['token'] = '********' + except (KeyError, TypeError): + # No secret here, carry on + pass + app.logger.debug('{}: {}'.format(text, log_obj)) + + +async def check_token(token_review): + '''Populate user info if token is found in auth-related files.''' + app.logger.info('Checking token') + token_to_check = token_review['spec']['token'] + + # If we have an admin token, short-circuit all other checks. This prevents us + # from leaking our admin token to other authn services. + admin_kubeconfig = Path('/root/.kube/config') + data = None + try: + try: + data = safe_load(admin_kubeconfig.read_text()) + except Exception: + # Retry loading the file once, in case the charm was in the + # middle of rewriting it. See lp:1837930 for more info, but + # even without it being rewritten on every hook, there will + # always be a race condition to consider. + await asyncio.sleep(0.5) + data = safe_load(admin_kubeconfig.read_text()) + except YAMLError as e: + # we don't want to use logger.exception() or str(e) because it + # can leak tokens into the log + app.logger.error('Invalid kube config file: %s', type(e).__name__) + except Exception: + if not admin_kubeconfig.exists(): + app.logger.error('Missing kube config file') + elif data is None: + app.logger.error('Empty kube config file') + else: + app.logger.exception('Invalid kube config file') + else: + admin_token = data['users'][0]['user']['token'] + if token_to_check == admin_token: + # We have a valid admin + token_review['status'] = { + 'authenticated': True, + 'user': { + 'username': 'admin', + 'uid': 'admin', + 'groups': ['system:masters'] + } + } + return True + + # No admin? We're probably in an upgrade. Check an existing known_tokens.csv. + csv_fields = ['token', 'username', 'user', 'groups'] + known_tokens = Path('/root/cdk/known_tokens.csv') + try: + with known_tokens.open('r') as f: + data_by_token = {r['token']: r for r in csv.DictReader(f, csv_fields)} + except FileNotFoundError: + data_by_token = {} + + if token_to_check in data_by_token: + record = data_by_token[token_to_check] + # groups are optional; default to an empty string if we don't have any + groups = record.get('groups', '').split(',') + token_review['status'] = { + 'authenticated': True, + 'user': { + 'username': record['username'], + 'uid': record['user'], + 'groups': groups, + } + } + return True + return False + + +async def check_secrets(token_review): + '''Populate user info if token is found in k8s secrets.''' + # Only check secrets if kube-apiserver is up + app.logger.info('Checking secret') + token = token_review['spec']['token'] + + if token in app['secrets']: + token_review['status'] = { + 'authenticated': True, + 'user': app['secrets'][token], + } + return True + else: + return False + + +async def check_aws_iam(token_review): + '''Check the request with an AWS IAM authn server.''' + app.logger.info('Checking AWS IAM') + + # URL comes from /root/cdk/aws-iam-webhook.yaml + app.logger.debug('Forwarding to: {}'.format(AWS_IAM_ENDPOINT)) + + return await forward_request(token_review, AWS_IAM_ENDPOINT) + + +async def check_keystone(token_review): + '''Check the request with a Keystone authn server.''' + app.logger.info('Checking Keystone') + + # URL comes from /root/cdk/keystone/webhook.yaml + app.logger.debug('Forwarding to: {}'.format(KEYSTONE_ENDPOINT)) + + return await forward_request(token_review, KEYSTONE_ENDPOINT) + + +async def check_custom(token_review): + '''Check the request with a user-specified authn server.''' + app.logger.info('Checking Custom Endpoint') + + # User will set the URL in k8s-cp config + app.logger.debug('Forwarding to: {}'.format(CUSTOM_AUTHN_ENDPOINT)) + + return await forward_request(token_review, CUSTOM_AUTHN_ENDPOINT) + + +async def forward_request(json_req, url): + '''Forward a JSON TokenReview request to a url. + + Returns True if the request is authenticated; False if the response is + either invalid or authn has been denied. + ''' + timeout = 10 + resp_text = '' + try: + async with aiohttp.ClientSession() as session: + try: + async with session.post(url, json=json_req, timeout=timeout) as resp: + resp_text = await resp.text() + except aiohttp.ClientSSLError: + app.logger.debug('SSLError with server; skipping cert validation') + async with session.post(url, + json=json_req, + verify_ssl=False, + timeout=timeout) as resp: + resp_text = await resp.text() + except asyncio.TimeoutError: + app.logger.error('Timed out contacting server') + return False + except Exception: + app.logger.exception('Failed to contact server') + return False + + # Check if the response is valid + try: + resp = json.loads(resp_text) + 'authenticated' in resp['status'] + except (KeyError, TypeError, ValueError): + log_secret(text='Invalid response from server', obj=resp_text) + return False + + # NB: When a forwarded request is authenticated, set the 'status' field to + # whatever the external server sends us. This ensures any status fields that + # the server wants to send makes it back to the kube apiserver. + if resp['status']['authenticated']: + json_req['status'] = resp['status'] + return True + return False + + +def ack(req, **kwargs): + # Successful checks will set auth and user data in the 'req' dict + log_secret(text='ACK', obj=req) + return aiohttp.web.json_response(req, **kwargs) + + +def nak(req, **kwargs): + # Force unauthenticated, just in case + req.setdefault('status', {})['authenticated'] = False + log_secret(text='NAK', obj=req) + return aiohttp.web.json_response(req, **kwargs) + + +@routes.post('/{{ api_ver }}') +async def webhook(request): + '''Listen on /$api_version for POST requests. + + For a POSTed TokenReview object, check every known authentication mechanism + for a user with a matching token. + + The /$api_version is expected to be the api version of the authentication.k8s.io + TokenReview that the k8s-apiserver will be sending. + + Returns: + TokenReview object with 'authenticated: True' and user attributes if a + token is found; otherwise, a TokenReview object with 'authenticated: False' + ''' + try: + req = await request.json() + except json.JSONDecodeError: + app.logger.debug('Unable to parse request') + return nak({}, status=400) + + # Make the request unauthenticated by deafult + req['status'] = {'authenticated': False} + + try: + valid = True if (req['kind'] == 'TokenReview' and + req['spec']['token']) else False + except (KeyError, TypeError): + valid = False + + if valid: + log_secret(text='REQ', obj=req) + else: + log_secret(text='Invalid request', obj=req) + return nak({}, status=400) + + if await check_token(req): + return ack(req) + + if not app['secrets']: + # If secrets aren't yet available, none of the system accounts will be + # functional and thus neither will the cluster, so there's no point to + # going any further. Additionally, we don't want to accidentally leak + # system account tokens to external auth endpoints. + app.logger.warning('Secrets not yet available; aborting') + return nak(req) + + if await check_secrets(req): + return ack(req) + + if AWS_IAM_ENDPOINT and await check_aws_iam(req): + return ack(req) + + if KEYSTONE_ENDPOINT and await check_keystone(req): + return ack(req) + + if CUSTOM_AUTHN_ENDPOINT and await check_custom(req): + return ack(req) + + return nak(req) + + +@routes.post('/slow-test') +async def slow_test(request): + app.logger.debug('Slow request started') + await asyncio.sleep(5) + app.logger.debug('Slow request finished') + return aiohttp.web.json_response({'status': {'authenticated': False}}) + + +async def refresh_secrets(app): + app.logger.info('Refreshing secrets') + retcode, stdout, stderr = await run( + 'systemctl', 'is-active', 'snap.kube-apiserver.daemon' + ) + # See note in run() docstring above about exit 255. + if retcode not in (0, 255) or stdout.strip() != 'active': + app.logger.info('Skipping secret refresh: kube-apiserver is not ready ' + '({}, {})'.format(retcode, stdout.strip())) + return + + retcode, stdout, stderr = await kubectl( + 'get', 'secrets', '-n', 'kube-system', '-o', 'json' + ) + # See note in run() docstring above about exit 255. + if retcode not in (0, 255) or stderr: + app.logger.warning('Unable to load secrets ({}): {}'.format(retcode, stderr)) + return + + try: + secrets = json.loads(stdout) + except json.JSONDecodeError: + app.logger.exception('Unable to parse secrets') + return + + new_secrets = {} + for secret in secrets.get('items', []): + try: + data_b64 = secret['data'] + username_b64 = data_b64['username'].encode('UTF-8') + password_b64 = data_b64['password'].encode('UTF-8') + groups_b64 = data_b64.get('groups', '').encode('UTF-8') + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + + username = uid = b64decode(username_b64).decode('UTF-8') + password = b64decode(password_b64).decode('UTF-8') + groups = b64decode(groups_b64).decode('UTF-8').split(',') + + # NB: CK creates k8s secrets with the 'password' field set as + # uid::token. Split the decoded password so we can send a 'uid' back. + # If there is no delimiter, set uid == username. + # TODO: make the delimeter less magical so it doesn't get out of + # sync with the function that creates secrets in kubernetes_control_plane.py. + pw_delim = '::' + if pw_delim in password: + uid = password.rsplit(pw_delim, 1)[0] + new_secrets[password] = { + 'username': username, + 'uid': uid, + 'groups': groups, + } + app['secrets'] = new_secrets + + +async def startup(app): + # Log to gunicorn + glogger = logging.getLogger('gunicorn.error') + app.logger.handlers = glogger.handlers + app.logger.setLevel(glogger.level) + + async def _task(): + while True: + try: + await refresh_secrets(app) + await asyncio.sleep(60) + except asyncio.CancelledError: + break + except Exception: + app.logger.exception('Failed to get secrets') + + app['secrets'] = {} + app['secrets_task'] = asyncio.ensure_future(_task()) + + +async def cleanup(app): + task = app.get('secrets_task') + task.cancel() + await task + + +app.add_routes(routes) +app.on_startup.append(startup) +app.on_cleanup.append(cleanup) + + +if __name__ == '__main__': + aiohttp.web.run_app(app) diff --git a/kubernetes-control-plane/templates/cdk.master.auth-webhook.service b/kubernetes-control-plane/templates/cdk.master.auth-webhook.service new file mode 100644 index 0000000..d13cd15 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.auth-webhook.service @@ -0,0 +1,25 @@ +[Unit] +Description=CDK control-plane auth webhook +After=network.target +StartLimitIntervalSec=0 + +[Service] +User=root +WorkingDirectory={{ root_dir }} +ExecStart={{ charm_dir }}/../.venv/bin/gunicorn \ + --bind {{ host }}:{{ port }} \ + --capture-output \ + --certfile /root/cdk/server.crt \ + --keyfile /root/cdk/server.key \ + --disable-redirect-access-to-syslog \ + --error-logfile /var/log/kubernetes/{{logfile}} \ + --log-level debug \ + --pid /run/{{ pidfile }} \ + --workers {{ num_workers }} \ + --worker-class aiohttp.worker.GunicornWebWorker \ + auth-webhook:app +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.path b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.path new file mode 100644 index 0000000..3855649 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.path @@ -0,0 +1,7 @@ +[Path] +PathChanged=/root/cdk/basic_auth.csv +PathChanged=/root/cdk/known_tokens.csv +PathChanged=/root/cdk/serviceaccount.key + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.service b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.service new file mode 100644 index 0000000..51b8edb --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.service @@ -0,0 +1,10 @@ +[Unit] +Description=CDK control-plane leader file-watcher +After=network.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/juju-run {{ unit }} /usr/local/sbin/cdk.master.leader.file-watcher.sh + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.sh b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.sh new file mode 100644 index 0000000..01a2e86 --- /dev/null +++ b/kubernetes-control-plane/templates/cdk.master.leader.file-watcher.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# This script is invoked by cdk.master.leader.file-watcher.service + +if [ is-leader ]; then + leader-set \ + "/root/cdk/basic_auth.csv=$(cat /root/cdk/basic_auth.csv)" \ + "/root/cdk/known_tokens.csv=$(cat /root/cdk/known_tokens.csv)" \ + "/root/cdk/serviceaccount.key=$(cat /root/cdk/serviceaccount.key)" +fi diff --git a/kubernetes-control-plane/templates/ceph.conf b/kubernetes-control-plane/templates/ceph.conf new file mode 100644 index 0000000..f4d8ce4 --- /dev/null +++ b/kubernetes-control-plane/templates/ceph.conf @@ -0,0 +1,17 @@ +[global] +auth cluster required = {{ auth_supported }} +auth service required = {{ auth_supported }} +auth client required = {{ auth_supported }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} + +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +mon cluster log to syslog = {{ use_syslog }} +debug mon = {{ loglevel }}/5 +debug osd = {{ loglevel }}/5 + +[client] +log file = /var/log/ceph.log + diff --git a/kubernetes-control-plane/templates/create-namespace.yaml.j2 b/kubernetes-control-plane/templates/create-namespace.yaml.j2 new file mode 100644 index 0000000..a121ecc --- /dev/null +++ b/kubernetes-control-plane/templates/create-namespace.yaml.j2 @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ name }} + labels: + name: {{ name }} diff --git a/kubernetes-control-plane/templates/grafana/autoload/kubernetes.json b/kubernetes-control-plane/templates/grafana/autoload/kubernetes.json new file mode 100644 index 0000000..8f26875 --- /dev/null +++ b/kubernetes-control-plane/templates/grafana/autoload/kubernetes.json @@ -0,0 +1,4032 @@ +{ + "dashboard":{ + "annotations":{ + "list":[ + { + "builtIn":1, + "datasource":"-- Grafana --", + "enable":true, + "hide":true, + "iconColor":"rgba(0, 211, 255, 1)", + "name":"Annotations & Alerts", + "type":"dashboard" + } + ] + }, + "description":"Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics.", + "editable":true, + "gnetId":315, + "graphTooltip":0, + "id":null, + "iteration":1572969306389, + "links":[ + + ], + "panels":[ + { + "collapsed":false, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":0 + }, + "id":49, + "panels":[ + + ], + "repeat":null, + "title":"Total usage", + "type":"row" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":5, + "w":8, + "x":0, + "y":1 + }, + "height":"180px", + "id":4, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{id=\"/\"}) / sum (machine_memory_bytes{}) * 100", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"65, 90", + "title":"Cluster memory usage", + "transparent":false, + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":5, + "w":8, + "x":8, + "y":1 + }, + "height":"180px", + "id":6, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{id=\"/\"}[1m])) / sum (machine_cpu_cores{}) * 100", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"65, 90", + "title":"Cluster CPU usage (1m avg)", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":5, + "w":8, + "x":16, + "y":1 + }, + "height":"180px", + "id":7, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (container_fs_usage_bytes{}) / sum (container_fs_limit_bytes{id=\"/\"}) * 100", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "metric":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"65, 90", + "title":"Cluster filesystem usage", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":0, + "y":6 + }, + "height":"1px", + "id":9, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"20%", + "prefix":"", + "prefixFontSize":"20%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{id=\"/\"})", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Used", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":4, + "y":6 + }, + "height":"1px", + "id":10, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (machine_memory_bytes{})", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Total", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"none", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":8, + "y":6 + }, + "height":"1px", + "id":11, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":" cores", + "postfixFontSize":"30%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{id=\"/\"}[1m]))", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Used", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"none", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":12, + "y":6 + }, + "height":"1px", + "id":12, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":" cores", + "postfixFontSize":"30%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (machine_cpu_cores{})", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Total", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":16, + "y":6 + }, + "height":"1px", + "id":13, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (container_fs_usage_bytes{id=\"/\"})", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Used", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":3, + "w":4, + "x":20, + "y":6 + }, + "height":"1px", + "id":14, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (container_fs_limit_bytes{id=\"/\"})", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"", + "refId":"A", + "step":300 + } + ], + "thresholds":"", + "title":"Total", + "type":"singlestat", + "valueFontSize":"50%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":9 + }, + "id":50, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "description":"", + "fill":1, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":10 + }, + "id":31, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"cpu_usage_idle{cpu=\"cpu-total\",host=~\".*kubernetes-master.*\"}", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{host}}", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"kubernetes-master CPU idle", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"kubernetes-master CPU idle", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":9 + }, + "id":51, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "description":"", + "fill":1, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":10 + }, + "id":48, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"cpu_usage_idle{cpu=\"cpu-total\",host=~\".*kubernetes-worker.*\"}", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{host}}", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"kubernetes-worker CPU idle", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"kubernetes-worker CPU idle", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":10 + }, + "id":52, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":11 + }, + "id":41, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":false, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":true, + "steppedLine":false, + "targets":[ + { + "expr":"sum(rate(apiserver_request_latencies_sum{}[1m])) / sum(rate(apiserver_request_latencies_count{}[1m]))", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{username}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"API request latency", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":12, + "y":11 + }, + "id":37, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum(rate(apiserver_request_count{}[1m])) by (code)", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{code}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"API server hits by code", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":18 + }, + "id":38, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":true, + "steppedLine":false, + "targets":[ + { + "expr":"rate(authenticated_user_requests{}[1m])", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{username}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Authenticated user requests", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":0, + "gridPos":{ + "h":7, + "w":12, + "x":12, + "y":18 + }, + "id":39, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"histogram_quantile(0.95, sum(rate(apiserver_request_latencies_bucket{}[5m])) by (le,resource) )", + "format":"time_series", + "hide":false, + "intervalFactor":2, + "legendFormat":"{{resource}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"API request latency by resource 95th percentile", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":25 + }, + "id":40, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"histogram_quantile(0.95, sum(rate(apiserver_request_latencies_bucket{}[5m])) by (le,verb) )", + "format":"time_series", + "hide":false, + "intervalFactor":2, + "legendFormat":"{{verb}}", + "refId":"A", + "step":30 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"API request latency by resource 95th percentile", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"API server", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":11 + }, + "id":53, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":32 + }, + "id":42, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"rate(admission_quota_controller_adds{}[10m])", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Admission requests (10min avg)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ] + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":12, + "y":32 + }, + "id":44, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"rate(admission_quota_controller_queue_latency_sum{}[10m]) / rate(admission_quota_controller_queue_latency_count{}[10m])", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Admission controller queue latency (10min avg)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ] + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":39 + }, + "id":43, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"rate(admission_quota_controller_work_duration_sum{}[10m]) / rate(admission_quota_controller_work_duration_count{}[10m])", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Admission controller work (10min avg)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ] + } + ], + "repeat":null, + "title":"Admission controller", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":12 + }, + "id":54, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":13 + }, + "id":36, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":true, + "min":true, + "show":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"etcd_request_cache_get_latencies_summary{}", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"Quantile {{quantile}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cache request latencies (get)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":12, + "y":13 + }, + "id":35, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":true, + "min":true, + "show":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"etcd_request_cache_add_latencies_summary{}", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"Quantile {{quantile}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cache request latencies (add)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":0, + "y":20 + }, + "id":33, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":true, + "min":true, + "show":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"etcd_helper_cache_hit_count{} / (etcd_helper_cache_miss_count{} + etcd_helper_cache_hit_count{}) * 100", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"Hit ratio", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cache hit ratio", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":null, + "logBase":1, + "max":"100", + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "fill":1, + "gridPos":{ + "h":7, + "w":12, + "x":12, + "y":20 + }, + "id":34, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":true, + "min":true, + "show":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum(rate(etcd_request_latencies_summary_sum{}[1m])) by (operation) / sum(rate(etcd_request_latencies_summary_count{}[1m])) by (operation)", + "format":"time_series", + "intervalFactor":2, + "legendFormat":"{{operation}}", + "refId":"A", + "step":60 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Average cache request latencies", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"ms", + "label":null, + "logBase":1, + "max":null, + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"Etcd", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":13 + }, + "id":55, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":48 + }, + "height":"", + "id":17, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum(rate (container_cpu_usage_seconds_total{image!=\"\",container=~\"$container\",namespace=~\"$namespace\"}[1m])) by (pod)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{pod}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Pods CPU usage (1m avg)", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "transparent":false, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":"cores", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"Pods CPU usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":14 + }, + "id":56, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "description":"", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":49 + }, + "height":"", + "id":24, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum(rate (container_cpu_usage_seconds_total{image!=\"\",container!=\"POD\",container=~\"$container\",namespace=~\"$namespace\"}[1m])) without (cpu)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{container}} (pod: {{pod}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Container CPU usage (1m avg)", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":"cores", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"Containers CPU usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":15 + }, + "id":57, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":50 + }, + "height":"", + "id":23, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{id=~\".*systemd.*service$\"}[1m])) by (id)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{id}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"System services CPU usage (1m avg)", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":"cores", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"System services CPU usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":16 + }, + "id":58, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":13, + "w":24, + "x":0, + "y":51 + }, + "id":20, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{id!=\"/\"}[1m])) by (id)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{id}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"All processes CPU usage (1m avg)", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":"cores", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"All processes CPU usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":17 + }, + "id":59, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":52 + }, + "id":25, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{image!=\"\",container=~\"$container\",namespace=~\"$namespace\"}) by (pod)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{pod}}", + "metric":"container_memory_usage:sort_desc", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Pods memory usage", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"bytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"Pods memory usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":18 + }, + "id":60, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":53 + }, + "id":27, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{image!=\"\",container!=\"POD\",container=~\"$container\",namespace=~\"$namespace\"}) by (container, pod)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{container}} (pod: {{pod}})", + "metric":"container_memory_usage:sort_desc", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Containers memory usage", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"bytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"Containers memory usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":19 + }, + "id":61, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":54 + }, + "id":26, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{id=~\".*systemd.*service$\"}) by (id)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{id}}", + "metric":"container_memory_usage:sort_desc", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"System services memory usage", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"bytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"System services memory usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":20 + }, + "id":62, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":13, + "w":24, + "x":0, + "y":55 + }, + "id":28, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{id!=\"/\"}) by (id)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{id}}", + "metric":"container_memory_usage:sort_desc", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"All processes memory usage", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"bytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + } + ], + "repeat":null, + "title":"All processes memory usage", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":21 + }, + "id":63, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":10, + "w":24, + "x":0, + "y":22 + }, + "height":"400px", + "id":32, + "legend":{ + "alignAsTable":true, + "avg":false, + "current":true, + "max":true, + "min":true, + "rightSide":false, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_network_receive_bytes_total{}[1m]))", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"Received", + "metric":"network", + "refId":"A", + "step":15 + }, + { + "expr":"- sum (rate (container_network_transmit_bytes_total{}[1m]))", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"Sent", + "metric":"network", + "refId":"B", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Network I/O pressure", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":0, + "value_type":"cumulative" + }, + "transparent":false, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"Bps", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"Network I/O", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":22 + }, + "id":64, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":24, + "x":0, + "y":23 + }, + "id":16, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_network_receive_bytes_total{image!=\"\",pod=~\"$container.*\",namespace=~\"$namespace\"}[1m])) by (pod)", + "format":"time_series", + "interval":"10s", + "intervalFactor":1, + "legendFormat":"-> {{pod}}", + "metric":"network", + "refId":"A", + "step":15 + }, + { + "expr":"- sum (rate (container_network_transmit_bytes_total{image!=\"\",pod=~\"$container.*\",namespace=~\"$namespace\"}[1m])) by (pod)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"<- {{pod}}", + "metric":"network", + "refId":"B", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Pods network I/O (1m avg)", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"Pods network I/O", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":23 + }, + "id":65, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":2, + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":13, + "w":24, + "x":0, + "y":24 + }, + "id":29, + "legend":{ + "alignAsTable":true, + "avg":true, + "current":true, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_network_receive_bytes_total{id!=\"/\"}[1m])) by (id)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"-> {{id}}", + "metric":"network", + "refId":"A", + "step":15 + }, + { + "expr":"- sum (rate (container_network_transmit_bytes_total{id!=\"/\"}[1m])) by (id)", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"<- {{id}}", + "metric":"network", + "refId":"B", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"All processes network I/O (1m avg)", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "repeat":null, + "title":"All processes network I/O", + "type":"row" + }, + { + "collapsed":true, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":24 + }, + "id":66, + "panels":[ + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "description":"", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":8, + "x":0, + "y":59 + }, + "height":"", + "id":45, + "legend":{ + "alignAsTable":true, + "avg":false, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum by (container) (container_fs_usage_bytes{id!=\"/\"})", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{container}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Total Bytes Used By Containers", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"bytes", + "label":"Bytes", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"prometheus - Juju generated source", + "decimals":3, + "description":"", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":7, + "w":8, + "x":8, + "y":59 + }, + "height":"", + "id":46, + "legend":{ + "alignAsTable":true, + "avg":false, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":true, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum by (container) (container_fs_usage_bytes{id!=\"/\"}) / on (environment) group_left(id) sum (container_fs_usage_bytes{id=\"/\"}) * 100", + "format":"time_series", + "hide":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"{{container}}", + "metric":"container_cpu", + "refId":"A", + "step":15 + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Current Usage Breakdown By Container", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"percent", + "label":"Percent", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ] + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"prometheus - Juju generated source", + "decimals":2, + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":7, + "w":8, + "x":16, + "y":59 + }, + "id":47, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum(container_fs_usage_bytes{id=\"/\"}) - sum(container_fs_usage_bytes{id!=\"/\"})", + "format":"time_series", + "instant":false, + "intervalFactor":2, + "refId":"A" + } + ], + "thresholds":"", + "title":"Total Bytes Not Used By Containers", + "transparent":false, + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"avg" + } + ], + "repeat":null, + "title":"Container Disk Utilisation", + "type":"row" + } + ], + "schemaVersion":16, + "style":"dark", + "tags":[ + "Juju", + "Kubernetes" + ], + "templating":{ + "list":[ + { + "allValue":".*", + "current":{ + + }, + "datasource":"prometheus - Juju generated source", + "hide":0, + "includeAll":true, + "label":null, + "multi":false, + "name":"namespace", + "options":[ + + ], + "query":"label_values(container_memory_usage_bytes{namespace=~\".+\",container!=\"POD\"},namespace)", + "refresh":1, + "regex":"", + "sort":1, + "tagValuesQuery":"", + "tags":[ + + ], + "tagsQuery":"", + "type":"query", + "useTags":false + }, + { + "allValue":".*", + "current":{ + + }, + "datasource":"prometheus - Juju generated source", + "hide":0, + "includeAll":true, + "label":null, + "multi":false, + "name":"container", + "options":[ + + ], + "query":"label_values(container_memory_usage_bytes{namespace=~\"$namespace\",container!=\"POD\"},container)", + "refresh":1, + "regex":"", + "sort":1, + "tagValuesQuery":"", + "tags":[ + + ], + "tagsQuery":"", + "type":"query", + "useTags":false + } + ] + }, + "time":{ + "from":"now-6h", + "to":"now" + }, + "timepicker":{ + "refresh_intervals":[ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options":[ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone":"utc", + "title":"Charmed Kubernetes Dashboard", + "version":1 + } +} \ No newline at end of file diff --git a/kubernetes-control-plane/templates/grafana/conditional/prometheus.json b/kubernetes-control-plane/templates/grafana/conditional/prometheus.json new file mode 100644 index 0000000..35c428a --- /dev/null +++ b/kubernetes-control-plane/templates/grafana/conditional/prometheus.json @@ -0,0 +1,2186 @@ +{ + "dashboard": { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Derived from https://grafana.com/dashboards/315", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1554419177157, + "links": [ + + ], + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 0 + }, + "height": "200px", + "id": 32, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 5 + }, + "height": "180px", + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Cluster memory usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 5 + }, + "height": "180px", + "id": 6, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Cluster CPU usage (1m avg)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 5 + }, + "height": "180px", + "id": 7, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Cluster filesystem usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 10 + }, + "height": "1px", + "id": 9, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "20%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"})", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 10 + }, + "height": "1px", + "id": 10, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 10 + }, + "height": "1px", + "id": 11, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m]))", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 10 + }, + "height": "1px", + "id": 12, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 10 + }, + "height": "1px", + "id": 13, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/.*$\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"})", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 10 + }, + "height": "1px", + "id": 14, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 13 + }, + "height": "", + "id": 17, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Pods CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 20 + }, + "height": "", + "id": 24, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_cpu", + "refId": "B", + "step": 10 + }, + { + "expr": "sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_cpu", + "refId": "C", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Containers CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 20, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "All processes CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (pod_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Pods memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 47 + }, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (container_name, pod_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod_name }} | {{ container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 10 + }, + { + "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "C", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Containers memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": { + + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 28, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}) by (id)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "All processes memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ pod_name }}", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (pod_name)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ pod_name }}", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Pods network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 74 + }, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "B", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (container_name, pod_name)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}", + "metric": "network", + "refId": "D", + "step": 10 + }, + { + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "C", + "step": 10 + }, + { + "expr": "sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "E", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "Containers network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ id }}", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$Namespace$\"}[1m])) by (id)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ id }}", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeRegions": [ + + ], + "timeShift": null, + "title": "All processes network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "Juju" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": "prometheus - Juju generated source", + "definition": "", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Node", + "options": [ + + ], + "query": "label_values(kubernetes_io_hostname)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "tags": [ + + ], + "text": "All", + "value": "$__all" + }, + "datasource": "prometheus - Juju generated source", + "definition": "label_values(namespace)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Namespace", + "options": [ + + ], + "query": "label_values(namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Metrics (via Prometheus)", + "version": 35 + }, + "overwrite": false +} diff --git a/kubernetes-control-plane/templates/grafana/conditional/telegraf.json b/kubernetes-control-plane/templates/grafana/conditional/telegraf.json new file mode 100644 index 0000000..1e9c0f1 --- /dev/null +++ b/kubernetes-control-plane/templates/grafana/conditional/telegraf.json @@ -0,0 +1,2094 @@ +{ + "dashboard": { + "annotations": { + "list": [ + + ] + }, + "description": "Derived from https://grafana.com/dashboards/941", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "system_load5{host=~\"$node\"} ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Node load average 5m", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "system_load15{host=~\"$node\"} ", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Node load average 15m", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "system_load1{host=~\"$node\"} ", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Node load average 1m", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Load average", + "titleSize": "h6" + }, + { + "collapse": true, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "processes_running{host=~\"$node\"} ", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Process running", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "processes_stopped{host=~\"$node\"} ", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Process stopped", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "processes_paging{host=~\"$node\"} ", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Process waiting", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Processes statistics", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_steal{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU steal", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_iowait{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU wait", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_user{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU user", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_system{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU system", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_softirq{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU soft interrupts", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_irq{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU interrupts", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_nice{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU nice", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cpu_usage_idle{cpu=\"cpu-total\", host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Idle", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "mem_cached{host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Mem cached", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "mem_buffered{host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Mem buffered", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 17, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "mem_free{host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Mem free", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "mem_used{host=~\"$node\"}", + "intervalFactor": 2, + "legendFormat": "{{host}}", + "refId": "A", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Mem used", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(diskio_reads{name=~\"$disk\", host=~\"$node\"}[5m])", + "intervalFactor": 2, + "legendFormat": "Read {{host}} {{name}}", + "refId": "A", + "step": 2 + }, + { + "expr": "rate(diskio_writes{name=~\"$disk\", host=~\"$node\"}[5m])", + "intervalFactor": 2, + "legendFormat": "Write {{host}} {{name}}", + "refId": "B", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk read/s and write/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(diskio_read_bytes{name=~\"$disk\", host=~\"$node\"}[5m])", + "intervalFactor": 2, + "legendFormat": "Read {{host}} {{name}}", + "refId": "A", + "step": 2 + }, + { + "expr": "rate(diskio_write_bytes{name=~\"$disk\", host=~\"$node\"}[5m])", + "intervalFactor": 2, + "legendFormat": "Write {{host}} {{name}}", + "refId": "B", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk read/s and write/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk statistics", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus - Juju generated source", + "editable": true, + "error": false, + "fill": 1, + "grid": { + + }, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(net_bytes_sent{interface=~\"$interface\", host=~\"$node\"}[5m])*8", + "intervalFactor": 2, + "legendFormat": "Out {{host}} {{interface}}", + "refId": "A", + "step": 2 + }, + { + "expr": "rate(net_bytes_recv{interface=~\"$interface\", host=~\"$node\"}[5m])*8", + "intervalFactor": 2, + "legendFormat": "In {{host}} {{interface}}", + "refId": "B", + "step": 2 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network load", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "Juju" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "prometheus - Juju generated source", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "node", + "options": [ + + ], + "query": "label_values(host)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Node Metrics (via Telegraf)", + "version": 4 + }, + "overwrite": false +} diff --git a/kubernetes-control-plane/templates/keystone-api-server-webhook.yaml b/kubernetes-control-plane/templates/keystone-api-server-webhook.yaml new file mode 100644 index 0000000..684c3ee --- /dev/null +++ b/kubernetes-control-plane/templates/keystone-api-server-webhook.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +preferences: {} +clusters: + - cluster: + server: https://{{ keystone_service_cluster_ip }}:8443/webhook + insecure-skip-tls-verify: true + name: webhook +users: + - name: webhook +contexts: + - context: + cluster: webhook + user: webhook + name: webhook +current-context: webhook diff --git a/kubernetes-control-plane/templates/kube-keystone.sh b/kubernetes-control-plane/templates/kube-keystone.sh new file mode 100644 index 0000000..b9bd5bd --- /dev/null +++ b/kubernetes-control-plane/templates/kube-keystone.sh @@ -0,0 +1,51 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Replace with your public address and port for keystone +export OS_AUTH_URL="{{ protocol }}://{{ address }}:{{ port }}/v{{ version }}" +#export OS_PROJECT_NAME=k8s +#export OS_DOMAIN_NAME=k8s +#export OS_USERNAME=myuser +#export OS_PASSWORD=secure_pw +get_keystone_token() { + data='{ + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "name": "'"${OS_USERNAME}"'", + "domain": { "name": "'"${OS_DOMAIN_NAME}"'" }, + "password": "'"${OS_PASSWORD}"'" + } + } + }, + "scope": { + "project": { + "domain": { + "name": "'"${OS_DOMAIN_NAME}"'" + }, + "name": "'"${OS_PROJECT_NAME}"'" + } + } + } +}' + token=$(curl -s -i -H "Content-Type: application/json" -d "${data}" "${OS_AUTH_URL}/auth/tokens" |grep 'X-Subject-Token') + if [ -z "$token" ]; then + echo "Invalid authentication information" + else + echo $(echo ${token} | awk -F ': ' '{print $2}' | sed -e 's/[[:space:]]*$//') + fi +} +echo "Function get_keystone_token created. Type get_keystone_token in order to generate a login token for the Kubernetes dashboard." diff --git a/kubernetes-control-plane/templates/kube-proxy-iptables-fix.sh b/kubernetes-control-plane/templates/kube-proxy-iptables-fix.sh new file mode 100644 index 0000000..a6d219e --- /dev/null +++ b/kubernetes-control-plane/templates/kube-proxy-iptables-fix.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +# add the chain, note that adding twice is ok as it will just error. +/sbin/iptables -t nat -N KUBE-MARK-DROP + +# need to check the creation of the rule to ensure we only create it once. +if ! /sbin/iptables -t nat -C KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000 &> /dev/null; then + /sbin/iptables -t nat -A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000 +fi diff --git a/kubernetes-control-plane/templates/nagios_plugin.py b/kubernetes-control-plane/templates/nagios_plugin.py new file mode 100644 index 0000000..1b2a329 --- /dev/null +++ b/kubernetes-control-plane/templates/nagios_plugin.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2019 Canonical Ltd. + +import nagios_plugin3 +import socket +from subprocess import check_output + +snap_resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', + 'kube-scheduler', 'cdk-addons', 'kube-proxy'] + + +def check_snaps_installed(): + """Confirm the snaps are installed, raise an error if not""" + for snap_name in snap_resources: + cmd = ['snap', 'list', snap_name] + try: + check_output(cmd).decode('UTF-8') + except Exception: + msg = '{} snap is not installed'.format(snap_name) + raise nagios_plugin3.CriticalError(msg) + + +def test_connection(host, port): + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(1) + s.connect((host, int(port))) + s.shutdown(socket.SHUT_RDWR) + finally: + s.close() + + +def verify_remote_connection_to_apiserver(): + try: + test_connection(socket.gethostbyname(socket.gethostname()), 6443) + except Exception: + raise nagios_plugin3.CriticalError("Unable to reach " + "API server on remote port") + + +def main(): + nagios_plugin3.try_check(check_snaps_installed) + nagios_plugin3.try_check(verify_remote_connection_to_apiserver) + print("OK - API server is up and accessible") + + +if __name__ == "__main__": + main() diff --git a/kubernetes-control-plane/templates/prometheus/k8s-api-endpoints.yaml.j2 b/kubernetes-control-plane/templates/prometheus/k8s-api-endpoints.yaml.j2 new file mode 100644 index 0000000..83625e6 --- /dev/null +++ b/kubernetes-control-plane/templates/prometheus/k8s-api-endpoints.yaml.j2 @@ -0,0 +1,16 @@ +job_name: 'k8s-api-endpoints' +kubernetes_sd_configs: +- api_server: https://{{k8s_api_address}}:{{k8s_api_port}} + role: endpoints + tls_config: + ca_file: __placeholder__ + bearer_token: {{k8s_token}} +scrape_interval: 30s +scheme: https +tls_config: + ca_file: __placeholder__ +bearer_token: {{k8s_token}} +relabel_configs: +- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https diff --git a/kubernetes-control-plane/templates/prometheus/kube-state-metrics.yaml.j2 b/kubernetes-control-plane/templates/prometheus/kube-state-metrics.yaml.j2 new file mode 100644 index 0000000..a0692fe --- /dev/null +++ b/kubernetes-control-plane/templates/prometheus/kube-state-metrics.yaml.j2 @@ -0,0 +1,10 @@ +job_name: 'kube-state-metrics' +scrape_interval: 30s +scheme: https +tls_config: + ca_file: __placeholder__ +bearer_token: {{k8s_token}} +metrics_path: /api/v1/namespaces/kube-system/services/kube-state-metrics:8080/proxy/metrics +static_configs: + - targets: + - {{k8s_api_address}}:{{k8s_api_port}} diff --git a/kubernetes-control-plane/templates/prometheus/kube-state-telemetry.yaml.j2 b/kubernetes-control-plane/templates/prometheus/kube-state-telemetry.yaml.j2 new file mode 100644 index 0000000..799ef39 --- /dev/null +++ b/kubernetes-control-plane/templates/prometheus/kube-state-telemetry.yaml.j2 @@ -0,0 +1,10 @@ +job_name: 'kube-state-telemetry' +scrape_interval: 30s +scheme: https +tls_config: + ca_file: __placeholder__ +bearer_token: {{k8s_token}} +metrics_path: /api/v1/namespaces/kube-system/services/kube-state-metrics:8081/proxy/metrics +static_configs: + - targets: + - {{k8s_api_address}}:{{k8s_api_port}} diff --git a/kubernetes-control-plane/templates/prometheus/kubernetes-cadvisor.yaml.j2 b/kubernetes-control-plane/templates/prometheus/kubernetes-cadvisor.yaml.j2 new file mode 100644 index 0000000..0ff8ed5 --- /dev/null +++ b/kubernetes-control-plane/templates/prometheus/kubernetes-cadvisor.yaml.j2 @@ -0,0 +1,21 @@ +job_name: 'kubernetes-cadvisor' +kubernetes_sd_configs: +- api_server: https://{{k8s_api_address}}:{{k8s_api_port}} + role: node + tls_config: + ca_file: __placeholder__ + bearer_token: {{k8s_token}} +scrape_interval: 30s +scheme: https +tls_config: + ca_file: __placeholder__ +bearer_token: {{k8s_token}} +relabel_configs: +- action: labelmap + regex: __meta_kubernetes_node_label_(.+) +- target_label: __address__ + replacement: {{k8s_api_address}}:{{k8s_api_port}} +- source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor diff --git a/kubernetes-control-plane/templates/prometheus/kubernetes-nodes.yaml.j2 b/kubernetes-control-plane/templates/prometheus/kubernetes-nodes.yaml.j2 new file mode 100644 index 0000000..32ea077 --- /dev/null +++ b/kubernetes-control-plane/templates/prometheus/kubernetes-nodes.yaml.j2 @@ -0,0 +1,21 @@ +job_name: 'kubernetes-nodes' +kubernetes_sd_configs: +- api_server: https://{{k8s_api_address}}:{{k8s_api_port}} + role: node + tls_config: + ca_file: __placeholder__ + bearer_token: {{k8s_token}} +scrape_interval: 30s +scheme: https +tls_config: + ca_file: __placeholder__ +bearer_token: {{k8s_token}} +relabel_configs: +- action: labelmap + regex: __meta_kubernetes_node_label_(.+) +- target_label: __address__ + replacement: {{k8s_api_address}}:{{k8s_api_port}} +- source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics diff --git a/kubernetes-control-plane/templates/rbac-pod-security-policy.yaml b/kubernetes-control-plane/templates/rbac-pod-security-policy.yaml new file mode 100644 index 0000000..d95eb4d --- /dev/null +++ b/kubernetes-control-plane/templates/rbac-pod-security-policy.yaml @@ -0,0 +1,55 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: privileged +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - privileged + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: privileged +roleRef: + kind: ClusterRole + name: privileged + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: Group + name: system:serviceaccounts + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/kubernetes-control-plane/templates/rbac-proxy.yaml b/kubernetes-control-plane/templates/rbac-proxy.yaml new file mode 100644 index 0000000..7b570ca --- /dev/null +++ b/kubernetes-control-plane/templates/rbac-proxy.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-clusterrole-cdk-{{ juju_application }} +rules: +- apiGroups: [""] + resources: + - nodes/metrics + - nodes/proxy + verbs: ["get", "list", "watch", "create", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-role-binding-cdk-{{ juju_application }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-clusterrole-cdk-{{ juju_application }} +subjects: {% for proxy_user in proxy_users %} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ proxy_user }} +{% endfor %} diff --git a/kubernetes-control-plane/templates/service-always-restart.systemd-229.conf b/kubernetes-control-plane/templates/service-always-restart.systemd-229.conf new file mode 100644 index 0000000..d5cf4b1 --- /dev/null +++ b/kubernetes-control-plane/templates/service-always-restart.systemd-229.conf @@ -0,0 +1,5 @@ +[Unit] +StartLimitInterval=0 + +[Service] +RestartSec=10 diff --git a/kubernetes-control-plane/templates/service-always-restart.systemd-latest.conf b/kubernetes-control-plane/templates/service-always-restart.systemd-latest.conf new file mode 100644 index 0000000..3dd37ab --- /dev/null +++ b/kubernetes-control-plane/templates/service-always-restart.systemd-latest.conf @@ -0,0 +1,5 @@ +[Unit] +StartLimitIntervalSec=0 + +[Service] +RestartSec=10 diff --git a/kubernetes-control-plane/templates/service-iptables-fix.service b/kubernetes-control-plane/templates/service-iptables-fix.service new file mode 100644 index 0000000..913aed3 --- /dev/null +++ b/kubernetes-control-plane/templates/service-iptables-fix.service @@ -0,0 +1,11 @@ +[Unit] +Description=Apply iptables rule for KUBE-MARK-DROP +After=network.target + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/kube-proxy-iptables-fix.sh +RemainAfterExit=true + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-control-plane/templates/system-monitoring-rbac-role.yaml b/kubernetes-control-plane/templates/system-monitoring-rbac-role.yaml new file mode 100644 index 0000000..ebbd95e --- /dev/null +++ b/kubernetes-control-plane/templates/system-monitoring-rbac-role.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:monitoring +rules: +- apiGroups: [""] + resources: + - "endpoints" + - "nodes" + - "nodes/proxy" + - "pods" + - "services" + - "services/proxy" + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:monitoring +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:monitoring diff --git a/kubernetes-control-plane/templates/vaultlocker-loop@.service b/kubernetes-control-plane/templates/vaultlocker-loop@.service new file mode 100644 index 0000000..8dd0230 --- /dev/null +++ b/kubernetes-control-plane/templates/vaultlocker-loop@.service @@ -0,0 +1,12 @@ +[Install] +RequiredBy=vaultlocker-decrypt@%i.service + +[Unit] +Description=Ensure loop device for VaultLocker %i +Before=vaultlocker-decrypt@%i.service + +[Service] +Type=oneshot +RemainAfterExit=true +EnvironmentFile=/etc/vaultlocker/loop-envs/%i +ExecStart=/sbin/losetup -f ${BACK_FILE} diff --git a/kubernetes-control-plane/templates/vaultlocker.conf.j2 b/kubernetes-control-plane/templates/vaultlocker.conf.j2 new file mode 100644 index 0000000..911a54c --- /dev/null +++ b/kubernetes-control-plane/templates/vaultlocker.conf.j2 @@ -0,0 +1,5 @@ +[vault] +url = {{ vault_url }} +approle = {{ role_id }} +backend = {{ secret_backend }} +secret_id = {{ secret_id }} diff --git a/kubernetes-control-plane/tests/data/ip_addr_json b/kubernetes-control-plane/tests/data/ip_addr_json new file mode 100644 index 0000000..9b10664 --- /dev/null +++ b/kubernetes-control-plane/tests/data/ip_addr_json @@ -0,0 +1,30 @@ +[ + { + "ifname": "ens192", + "operstate": "UP", + "addr_info": [ + { + "local": "10.246.154.77", + "prefixlen": 24, + "metric": 100 + }, + {} + ] + }, + { + "ifname": "lxdbr0", + "operstate": "UP", + "addr_info": [ + { + "local": "10.111.246.1", + "prefixlen": 24 + } + ] + }, + { + "link_index": 4, + "ifname": "veth890e3a36", + "operstate": "UP", + "addr_info": [] + } +] \ No newline at end of file diff --git a/kubernetes-control-plane/tests/functional/conftest.py b/kubernetes-control-plane/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-control-plane/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-control-plane/tests/functional/test_k8s_common.py b/kubernetes-control-plane/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubernetes-control-plane/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubernetes-control-plane/tests/unit/conftest.py b/kubernetes-control-plane/tests/unit/conftest.py new file mode 100644 index 0000000..5092457 --- /dev/null +++ b/kubernetes-control-plane/tests/unit/conftest.py @@ -0,0 +1,36 @@ +import os + +from charms.unit_test import patch_module, identity, MockKV, flags, MockEndpoint + + +ch = patch_module("charmhelpers") +ch.core.hookenv.atexit = identity +ch.core.hookenv.charm_dir.return_value = "charm_dir" +ch.core.unitdata.kv.return_value = MockKV() + +reactive = patch_module("charms.reactive") +reactive.when.return_value = identity +reactive.when_all.return_value = identity +reactive.when_any.return_value = identity +reactive.when_not.return_value = identity +reactive.when_not_all.return_value = identity +reactive.when_none.return_value = identity +reactive.hook.return_value = identity +reactive.set_flag.side_effect = flags.add +reactive.clear_flag.side_effect = flags.discard +reactive.set_state.side_effect = flags.add +reactive.remove_state.side_effect = flags.discard +reactive.toggle_flag.side_effect = lambda f, s: ( + flags.add(f) if s else flags.discard(f) +) +reactive.is_flag_set.side_effect = lambda f: f in flags +reactive.is_state.side_effect = lambda f: f in flags +reactive.get_flags.side_effect = lambda: sorted(flags) +reactive.get_unset_flags.side_effect = lambda *f: sorted(set(f) - flags) + +reactive.Endpoint = MockEndpoint + +os.environ["JUJU_MODEL_UUID"] = "test-1234" +os.environ["JUJU_UNIT_NAME"] = "test/0" +os.environ["JUJU_MACHINE_ID"] = "0" +os.environ["JUJU_AVAILABILITY_ZONE"] = "" diff --git a/kubernetes-control-plane/tests/unit/test_k8s_common.py b/kubernetes-control-plane/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..5e4fc56 --- /dev/null +++ b/kubernetes-control-plane/tests/unit/test_k8s_common.py @@ -0,0 +1,148 @@ +import json +import string +from subprocess import CalledProcessError +from pathlib import Path +from unittest.mock import Mock, patch +from charms.reactive import endpoint_from_flag + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" + + +@patch("os.listdir") +@patch("os.remove") +@patch("os.symlink") +def test_configure_default_cni(os_symlink, os_remove, os_listdir): + os_listdir.return_value = ["05-default.conflist", "10-cni.conflist"] + cni = endpoint_from_flag("cni.available") + cni.get_config.return_value = { + "cidr": "192.168.0.0/24", + "cni-conf-file": "10-cni.conflist", + } + kc.configure_default_cni("test-cni") + os_remove.assert_called_once_with("/etc/cni/net.d/05-default.conflist") + os_symlink.assert_called_once_with( + "10-cni.conflist", "/etc/cni/net.d/05-default.conflist" + ) + + +def test_get_bind_addrs(): + response = Path("tests", "data", "ip_addr_json").read_bytes() + with patch.object(kc, "check_output", return_value=response): + addrs = kc.get_bind_addrs() + assert addrs == ["10.246.154.77"] diff --git a/kubernetes-control-plane/tests/unit/test_layer.py b/kubernetes-control-plane/tests/unit/test_layer.py new file mode 100644 index 0000000..7cc9344 --- /dev/null +++ b/kubernetes-control-plane/tests/unit/test_layer.py @@ -0,0 +1,69 @@ +import pytest +import unittest.mock as mock + +from charms.layer import kubernetes_node_base +from charmhelpers.core import hookenv + + +class TestNodeLabels: + @pytest.fixture(autouse=True) + def setup(self, monkeypatch, request): + self.kube_control = mock.Mock() + self.config = {"labels": f'{request.node.name}="value"'} + + hc = mock.Mock() + hc.side_effect = lambda k=None: self.config[k] if k else self.config + monkeypatch.setattr(hookenv, "config", hc) + + self.hook_log = mock.Mock() + monkeypatch.setattr(hookenv, "log", self.hook_log) + + hsn = mock.Mock(return_value="kubernetes-control-plane") + monkeypatch.setattr(hookenv, "service_name", hsn) + + gnn = mock.Mock(return_value="the-node") + monkeypatch.setattr(kubernetes_node_base, "get_node_name", gnn) + + mock_call = self.call = mock.Mock(return_value=0) + monkeypatch.setattr(kubernetes_node_base, "call", mock_call) + + self.base_node_cmd = [ + "kubectl", + "--kubeconfig=/path/to/kube/config", + "label", + "node", + "the-node", + ] + + def test_label_add(self, request): + label_maker = kubernetes_node_base.LabelMaker("/path/to/kube/config") + label_maker.apply_node_labels() + + call_set = [ + mock.call(self.base_node_cmd + expected) + for expected in [ + [f'{request.node.name}="value"', "--overwrite"], + ["juju-application=kubernetes-control-plane", "--overwrite"], + ["juju.io/cloud-"], + ] + ] + self.call.assert_has_calls(call_set, any_order=False) + + def test_invalid_label(self): + self.config = {"labels": "too=many=equals not_enough_equals"} + label_maker = kubernetes_node_base.LabelMaker("/path/to/kube/config") + label_maker.apply_node_labels() + call_set = [ + mock.call(self.base_node_cmd + expected) + for expected in [ + ["juju-application=kubernetes-control-plane", "--overwrite"], + ["juju.io/cloud-"], + ] + ] + self.call.assert_has_calls(call_set, any_order=False) + + call_set = [ + mock.call("Skipping malformed option: too=many=equals."), + mock.call("Skipping malformed option: not_enough_equals."), + ] + self.hook_log.assert_has_calls(call_set, any_order=False) diff --git a/kubernetes-control-plane/tests/unit/test_reactive.py b/kubernetes-control-plane/tests/unit/test_reactive.py new file mode 100644 index 0000000..6fae90d --- /dev/null +++ b/kubernetes-control-plane/tests/unit/test_reactive.py @@ -0,0 +1,99 @@ +from unittest.mock import patch, Mock + +import pytest + +from reactive.vault_kv import update_app_kv_hashes +from charms.layer.vault_kv import VaultAppKV + + +@pytest.fixture() +def mock_vault_config(): + with patch( + "charms.layer.vault_kv.get_vault_config", + return_value={ + "secret_backend": "charm-unit-test", + "vault_url": "http://vault", + "role_id": 1234, + "secret_id": "super-secret", + }, + ) as vc: + yield vc + + +@pytest.fixture() +def destroy_vault_kv(): + """Teardown singleton instance created in each unit test.""" + yield + del VaultAppKV._singleton_instance + + +@patch("hvac.Client", autospec=True) +@patch("charmhelpers.core.hookenv.local_unit", Mock(return_value="unit-test/0")) +@patch("charmhelpers.core.hookenv.is_leader", Mock(return_value=True)) +@patch("charmhelpers.core.hookenv.leader_set") +def test_update_app_kv_hashes_leader( + mock_leader_set, mock_hvac_client, mock_vault_config, destroy_vault_kv +): + def mock_read(path): + if path == "charm-unit-test/kv/app": + return dict(data={"tested-key": "tested-value"}) + elif path == "charm-unit-test/kv/app-hashes/0": + return {} + + client = mock_hvac_client.return_value + client.read.side_effect = mock_read + + # -------------------------------- + # exit #1 + # Both leader_set and update_hashes should be executed + update_app_kv_hashes() + client.write.assert_called_once_with( + "charm-unit-test/kv/app-hashes/0", + **{"tested-key": "b40d0066377d3ec7015ab9f498699940"} + ) + mock_leader_set.assert_called_once() + + # -------------------------------- + # exit #2 + # Neither leader_set nor update_hashes should be executed + mock_leader_set.reset_mock() + client.write.reset_mock() + update_app_kv_hashes() + client.write.assert_not_called() + mock_leader_set.assert_not_called() + + +@patch("hvac.Client", autospec=True) +@patch("charmhelpers.core.hookenv.local_unit", Mock(return_value="unit-test/0")) +@patch("charmhelpers.core.hookenv.is_leader", Mock(return_value=False)) +@patch("charmhelpers.core.hookenv.leader_set") +def test_update_app_kv_hashes_follower( + mock_leader_set, mock_hvac_client, mock_vault_config, destroy_vault_kv +): + def mock_read(path): + if path == "charm-unit-test/kv/app": + return dict(data={"tested-key": "tested-value"}) + elif path == "charm-unit-test/kv/app-hashes/0": + return {} + + client = mock_hvac_client.return_value + client.read.side_effect = mock_read + + # -------------------------------- + # exit #1 + # Only update_hashes should be executed, not leader_set + update_app_kv_hashes() + client.write.assert_called_once_with( + "charm-unit-test/kv/app-hashes/0", + **{"tested-key": "b40d0066377d3ec7015ab9f498699940"} + ) + mock_leader_set.assert_not_called() + + # -------------------------------- + # exit #2 + # Neither leader_set nor update_hashes should be executed + mock_leader_set.reset_mock() + client.write.reset_mock() + update_app_kv_hashes() + client.write.assert_not_called() + mock_leader_set.assert_not_called() diff --git a/kubernetes-control-plane/tox.ini b/kubernetes-control-plane/tox.ini new file mode 100644 index 0000000..805089a --- /dev/null +++ b/kubernetes-control-plane/tox.ini @@ -0,0 +1,18 @@ +[tox] +envlist = py3 +skipsdist = true + +[testenv] +basepython=python3 +envdir={toxworkdir}/py3 +deps= + ipdb + pytest + charms.reactive + pydoc-markdown + # needed to prevent apt installs during import + netifaces + psutil + +[testenv:docs] +commands=python make_docs diff --git a/kubernetes-control-plane/version b/kubernetes-control-plane/version new file mode 100644 index 0000000..a43c25d --- /dev/null +++ b/kubernetes-control-plane/version @@ -0,0 +1 @@ +1.24+ck1 \ No newline at end of file diff --git a/kubernetes-control-plane/wheelhouse.txt b/kubernetes-control-plane/wheelhouse.txt new file mode 100644 index 0000000..22bb863 --- /dev/null +++ b/kubernetes-control-plane/wheelhouse.txt @@ -0,0 +1,49 @@ +# layer:basic +# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164 +# even with installing setuptools before upgrading pip ends up with pip seeing +# the older setuptools at the system level if include_system_packages is true +pip>=18.1,<19.0;python_version < '3.8' +pip;python_version >= '3.8' +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty +Jinja2==2.10;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +Jinja2==2.11;python_version == '2.7' or python_version == '3.5' # py27, py35 +Jinja2;python_version >= '3.6' # py36 and on + +PyYAML==5.2;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +PyYAML<5.4;python_version == '2.7' or python_version >= '3.5' # all else + +MarkupSafe<2.0.0;python_version < '3.6' +MarkupSafe<2.1.0;python_version == '3.6' # Just for python 3.6 +MarkupSafe;python_version >= '3.7' # newer pythons + +setuptools<42;python_version < '3.8' +setuptools;python_version >= '3.8' +setuptools-scm<=1.17.0;python_version < '3.8' +setuptools-scm;python_version >= '3.8' +flit_core;python_version >= '3.8' +charmhelpers>=0.4.0,<2.0.0 +charms.reactive>=0.1.0,<2.0.0 +wheel<0.34;python_version < '3.8' +wheel;python_version >= '3.8' +# pin netaddr to avoid pulling importlib-resources +netaddr<=0.7.19 + +# layer:snap +# Newer versions of tenacity rely on `typing` which is in stdlib in +# python3.5 but not python3.4. We want to continue to support +# python3.4 (Trusty) +tenacity<5.0.4 + +# layer:vault-kv +hvac +# needed to prevent apt installs during import +netifaces +psutil + +# kubernetes-control-plane +aiohttp>=3.7.4,<3.8.0 +gunicorn>=20.0.0,<21.0.0 +loadbalancer-interface +typing_extensions<4.0 + diff --git a/kubernetes-control-plane/wheelhouse/Jinja2-3.0.3.tar.gz b/kubernetes-control-plane/wheelhouse/Jinja2-3.0.3.tar.gz new file mode 100644 index 0000000..cb150bc Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/Jinja2-3.0.3.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/MarkupSafe-2.0.1.tar.gz b/kubernetes-control-plane/wheelhouse/MarkupSafe-2.0.1.tar.gz new file mode 100644 index 0000000..7a37fc9 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/MarkupSafe-2.0.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/PyYAML-5.3.1.tar.gz b/kubernetes-control-plane/wheelhouse/PyYAML-5.3.1.tar.gz new file mode 100644 index 0000000..915d67b Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/PyYAML-5.3.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/aiohttp-3.7.4.post0.tar.gz b/kubernetes-control-plane/wheelhouse/aiohttp-3.7.4.post0.tar.gz new file mode 100644 index 0000000..bb0fc48 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/aiohttp-3.7.4.post0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/async-timeout-3.0.1.tar.gz b/kubernetes-control-plane/wheelhouse/async-timeout-3.0.1.tar.gz new file mode 100644 index 0000000..dfed0e0 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/async-timeout-3.0.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/attrs-22.1.0.tar.gz b/kubernetes-control-plane/wheelhouse/attrs-22.1.0.tar.gz new file mode 100644 index 0000000..1b5a7e1 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/attrs-22.1.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/cached-property-1.5.2.tar.gz b/kubernetes-control-plane/wheelhouse/cached-property-1.5.2.tar.gz new file mode 100644 index 0000000..501f2c0 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/cached-property-1.5.2.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/certifi-2022.6.15.tar.gz b/kubernetes-control-plane/wheelhouse/certifi-2022.6.15.tar.gz new file mode 100644 index 0000000..9db6c5c Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/certifi-2022.6.15.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/chardet-4.0.0.tar.gz b/kubernetes-control-plane/wheelhouse/chardet-4.0.0.tar.gz new file mode 100644 index 0000000..6bfc4e3 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/chardet-4.0.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/charmhelpers-1.2.1.tar.gz b/kubernetes-control-plane/wheelhouse/charmhelpers-1.2.1.tar.gz new file mode 100644 index 0000000..78f281b Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/charmhelpers-1.2.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/charms.reactive-1.5.0.tar.gz b/kubernetes-control-plane/wheelhouse/charms.reactive-1.5.0.tar.gz new file mode 100644 index 0000000..3d6c57b Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/charms.reactive-1.5.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/charset-normalizer-2.0.12.tar.gz b/kubernetes-control-plane/wheelhouse/charset-normalizer-2.0.12.tar.gz new file mode 100644 index 0000000..8b5797f Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/charset-normalizer-2.0.12.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/gunicorn-20.1.0.tar.gz b/kubernetes-control-plane/wheelhouse/gunicorn-20.1.0.tar.gz new file mode 100644 index 0000000..b5da493 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/gunicorn-20.1.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/hvac-0.11.2.tar.gz b/kubernetes-control-plane/wheelhouse/hvac-0.11.2.tar.gz new file mode 100644 index 0000000..6aa6982 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/hvac-0.11.2.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/idna-3.3.tar.gz b/kubernetes-control-plane/wheelhouse/idna-3.3.tar.gz new file mode 100644 index 0000000..ff2bcbf Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/idna-3.3.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/idna-ssl-1.1.0.tar.gz b/kubernetes-control-plane/wheelhouse/idna-ssl-1.1.0.tar.gz new file mode 100644 index 0000000..2380177 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/idna-ssl-1.1.0.tar.gz differ diff --git a/kubernetes-control-plane.charm b/kubernetes-control-plane/wheelhouse/loadbalancer_interface-1.1.1.tar.gz similarity index 70% rename from kubernetes-control-plane.charm rename to kubernetes-control-plane/wheelhouse/loadbalancer_interface-1.1.1.tar.gz index d0d6b43..265133c 100644 Binary files a/kubernetes-control-plane.charm and b/kubernetes-control-plane/wheelhouse/loadbalancer_interface-1.1.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/marshmallow-3.14.1.tar.gz b/kubernetes-control-plane/wheelhouse/marshmallow-3.14.1.tar.gz new file mode 100644 index 0000000..8030d97 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/marshmallow-3.14.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/marshmallow-enum-1.5.1.tar.gz b/kubernetes-control-plane/wheelhouse/marshmallow-enum-1.5.1.tar.gz new file mode 100644 index 0000000..642941a Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/marshmallow-enum-1.5.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/multidict-5.2.0.tar.gz b/kubernetes-control-plane/wheelhouse/multidict-5.2.0.tar.gz new file mode 100644 index 0000000..9563429 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/multidict-5.2.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/netaddr-0.7.19.tar.gz b/kubernetes-control-plane/wheelhouse/netaddr-0.7.19.tar.gz new file mode 100644 index 0000000..cc31d9d Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/netaddr-0.7.19.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/netifaces-0.11.0.tar.gz b/kubernetes-control-plane/wheelhouse/netifaces-0.11.0.tar.gz new file mode 100644 index 0000000..3a35596 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/netifaces-0.11.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/ops-1.5.0.tar.gz b/kubernetes-control-plane/wheelhouse/ops-1.5.0.tar.gz new file mode 100644 index 0000000..bc4e8c6 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/ops-1.5.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/ops_reactive_interface-1.0.1.tar.gz b/kubernetes-control-plane/wheelhouse/ops_reactive_interface-1.0.1.tar.gz new file mode 100644 index 0000000..14f5ded Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/ops_reactive_interface-1.0.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/pbr-5.9.0.tar.gz b/kubernetes-control-plane/wheelhouse/pbr-5.9.0.tar.gz new file mode 100644 index 0000000..9c46601 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/pbr-5.9.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/pip-18.1.tar.gz b/kubernetes-control-plane/wheelhouse/pip-18.1.tar.gz new file mode 100644 index 0000000..a18192d Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/pip-18.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/psutil-5.9.1.tar.gz b/kubernetes-control-plane/wheelhouse/psutil-5.9.1.tar.gz new file mode 100644 index 0000000..752c4a4 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/psutil-5.9.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/pyaml-21.10.1.tar.gz b/kubernetes-control-plane/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/requests-2.27.1.tar.gz b/kubernetes-control-plane/wheelhouse/requests-2.27.1.tar.gz new file mode 100644 index 0000000..8f3a2fd Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/requests-2.27.1.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/setuptools-41.6.0.zip b/kubernetes-control-plane/wheelhouse/setuptools-41.6.0.zip new file mode 100644 index 0000000..3345759 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/setuptools-41.6.0.zip differ diff --git a/kubernetes-control-plane/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubernetes-control-plane/wheelhouse/setuptools_scm-1.17.0.tar.gz new file mode 100644 index 0000000..43b16c7 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/setuptools_scm-1.17.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/six-1.16.0.tar.gz b/kubernetes-control-plane/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/six-1.16.0.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/tenacity-5.0.3.tar.gz b/kubernetes-control-plane/wheelhouse/tenacity-5.0.3.tar.gz new file mode 100644 index 0000000..c7d05ba Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/tenacity-5.0.3.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/typing_extensions-3.10.0.2.tar.gz b/kubernetes-control-plane/wheelhouse/typing_extensions-3.10.0.2.tar.gz new file mode 100644 index 0000000..dad7a2c Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/typing_extensions-3.10.0.2.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/urllib3-1.26.11.tar.gz b/kubernetes-control-plane/wheelhouse/urllib3-1.26.11.tar.gz new file mode 100644 index 0000000..49bbb5d Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/urllib3-1.26.11.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/wheel-0.33.6.tar.gz b/kubernetes-control-plane/wheelhouse/wheel-0.33.6.tar.gz new file mode 100644 index 0000000..c922c4e Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/wheel-0.33.6.tar.gz differ diff --git a/kubernetes-control-plane/wheelhouse/yarl-1.7.2.tar.gz b/kubernetes-control-plane/wheelhouse/yarl-1.7.2.tar.gz new file mode 100644 index 0000000..e7e1c13 Binary files /dev/null and b/kubernetes-control-plane/wheelhouse/yarl-1.7.2.tar.gz differ diff --git a/kubernetes-worker.charm b/kubernetes-worker.charm deleted file mode 100644 index fc81ed6..0000000 Binary files a/kubernetes-worker.charm and /dev/null differ diff --git a/kubernetes-worker/.build.manifest b/kubernetes-worker/.build.manifest new file mode 100644 index 0000000..6138931 --- /dev/null +++ b/kubernetes-worker/.build.manifest @@ -0,0 +1,1726 @@ +{ + "layers": [ + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", + "url": "layer:options" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fb767dcf0786d1d5364199bb3b40bdc86518b45b", + "url": "layer:basic" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", + "url": "layer:status" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34", + "url": "layer:apt" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", + "url": "layer:debug" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "a0e1f28e8bb9040eada9a7a73f66ee6a615704b7", + "url": "layer:snap" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", + "url": "layer:leadership" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", + "url": "layer:metrics" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", + "url": "layer:nagios" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", + "url": "layer:tls-client" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688", + "url": "layer:cdk-service-kicker" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "195fa11c4b087cef044b9bd3a8b8d2b2540cb727", + "url": "layer:cis-benchmark" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf", + "url": "layer:coordinator" + }, + { + "branch": "refs/heads/stable", + "rev": "b93fae0e73bb48074deb0062db204b621caa9f1f", + "url": "layer:kubernetes-common" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "85ede006f2588cf6f95a05f9287c4094ae1503c3", + "url": "layer:kubernetes-node-base" + }, + { + "branch": "refs/heads/stable", + "rev": "30b3d6ffc25f011d7588e08fd3c37be5f883f87e", + "url": "kubernetes-worker" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "95d744d1dbc4d86fb0462283c9371619bf5bbc24", + "url": "interface:nrpe-external-master" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d9850016d930a6d507b9fd45e2598d327922b140", + "url": "interface:tls-certificates" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", + "url": "interface:container-runtime" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", + "url": "interface:http" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "3ebfa8c70580aec7d9fcd2be1c74cef3457117f3", + "url": "interface:kubernetes-cni" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "ff5434d8353292057a591dddc5ca749aea2c3b5f", + "url": "interface:kube-control" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7", + "url": "interface:aws-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb", + "url": "interface:gcp-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "aa365314041ccbd0018e7c73e0de39eed9be045f", + "url": "interface:openstack-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b", + "url": "interface:vsphere-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "f476e31d066498b493239eeefab57b11c3d377f4", + "url": "interface:prometheus" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "8d2202e433d7c188de4df2fd4bddb355193e93ac", + "url": "interface:azure-integration" + }, + { + "branch": "refs/heads/main\nrefs/heads/stable", + "rev": "d5a2526fec9c3e8581f18b56e84a86871583e080", + "url": "interface:mount" + } + ], + "signatures": { + ".build.manifest": [ + "build", + "dynamic", + "unchecked" + ], + ".github/workflows/main.yaml": [ + "layer:kubernetes-node-base", + "static", + "0e5670aa0458f545b05ee907c9f7e31c4bb78b7db68f982a6ce59996f55903a4" + ], + ".github/workflows/main.yml": [ + "kubernetes-worker", + "static", + "7f40a341792064acd874a5ed7e7753e2cc9c3327f9e31f3040528d8f6d7537e3" + ], + ".gitignore": [ + "kubernetes-worker", + "static", + "e028ad966843fa4e09963c008d1200117caf1a42163c70795d9c55406f801d8c" + ], + ".travis.yml": [ + "layer:cis-benchmark", + "static", + "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2" + ], + "CONTRIBUTING.md": [ + "kubernetes-worker", + "static", + "bbe14e93d7db43d022103e3088036dc6fc1ca0554538a8205bcc07ef730d1ded" + ], + "HACKING.md": [ + "kubernetes-worker", + "static", + "fc87d881098225b72f3ca6a1b8e01cce143b130e3dc8ac1ec484a0de19bc4d6c" + ], + "LICENSE": [ + "kubernetes-worker", + "static", + "f02fd85a4171482f6bb1d6f87fe0704d3a2da93eca04afe39a0310a00c409902" + ], + "Makefile": [ + "layer:basic", + "static", + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" + ], + "README.md": [ + "kubernetes-worker", + "static", + "ca674608d655a85e9499dd787910e72c05929a5c341e9438cefb66e6493c6ff0" + ], + "actions.yaml": [ + "kubernetes-worker", + "dynamic", + "7419da1f39db4832e85019aa10c6eba09c1b57e669bdb780aee3669e7930a999" + ], + "actions/cis-benchmark": [ + "layer:cis-benchmark", + "static", + "bb727abb091314e91274f2367eae173c3a5303f289d539e21ff0587d01a32de1" + ], + "actions/debug": [ + "layer:debug", + "static", + "db0a42dae4c5045b2c06385bf22209dfe0e2ded55822ef847d84b01d9ff2b046" + ], + "actions/microbot": [ + "kubernetes-worker", + "static", + "d9024669711f33949f26710a8138d8bf09779a81fc6398d231b4b44997170b9b" + ], + "actions/pause": [ + "kubernetes-worker", + "static", + "ba4a19dc800ff6381367ad2bd84b0ad0c06180a77834c762750a4bdedb9ff366" + ], + "actions/resume": [ + "kubernetes-worker", + "static", + "460d50796be763674cdadb5d88ccdc0c883eb21fb3cf86805c46da18922022a7" + ], + "actions/upgrade": [ + "kubernetes-worker", + "static", + "641458372b97c9a0ef15c00aa8934d631f3c3b159d53f6e30cf8022bd3ff705e" + ], + "bin/charm-env": [ + "layer:basic", + "static", + "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5" + ], + "bin/layer_option": [ + "layer:options", + "static", + "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc" + ], + "build-cni-resources.sh": [ + "kubernetes-worker", + "static", + "2acb456393d677f9f3f259c5bf90a5c0155e0e8aed15dee22100d13034d1dc47" + ], + "config.yaml": [ + "kubernetes-worker", + "dynamic", + "d7205c9f51c5e4a181ea91846ba29fc31ba58752f1df440113fa0ede538e2a8a" + ], + "copyright": [ + "kubernetes-worker", + "static", + "badd4492d214890abd07b615f9e1a7a5ff3339b6c44655a826c746a9263ff00d" + ], + "copyright.layer-apt": [ + "layer:apt", + "static", + "5123b2d0220fefb4424a463216fb41a6dd7cfad49c9799ba7037f1e74a2fd6bc" + ], + "copyright.layer-basic": [ + "layer:basic", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-coordinator": [ + "layer:coordinator", + "static", + "7d212a095a6143559fb51f26bc40c2ba24b977190f65c7e5c835104f54d5dfc5" + ], + "copyright.layer-leadership": [ + "layer:leadership", + "static", + "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9" + ], + "copyright.layer-metrics": [ + "layer:metrics", + "static", + "08509dcbade4c20761ba4382ef23c831744dbab1d4a8dd94a1c2b4d4e913334c" + ], + "copyright.layer-nagios": [ + "layer:nagios", + "static", + "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749" + ], + "copyright.layer-options": [ + "layer:options", + "static", + "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" + ], + "copyright.layer-snap": [ + "layer:snap", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "copyright.layer-status": [ + "layer:status", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "debug-scripts/charm-unitdata": [ + "layer:debug", + "static", + "c952b9d31f3942e4e722cb3e70f5119707b69b8e76cc44e2e906bc6d9aef49b7" + ], + "debug-scripts/filesystem": [ + "layer:debug", + "static", + "d29cc8687f4422d024001c91b1ac756ee6bf8a2a125bc98db1199ba775eb8fd7" + ], + "debug-scripts/inotify": [ + "kubernetes-worker", + "static", + "8991354951b11e32a9edf4736e7ca0d5948d6c30a9a83673193aadf829032223" + ], + "debug-scripts/juju-logs": [ + "layer:debug", + "static", + "d260b35753a917368cb8c64c1312546a0a40ef49cba84c75bc6369549807c55e" + ], + "debug-scripts/juju-network-get": [ + "layer:debug", + "static", + "6d849a1f8e6569bd0d5ea38299f7937cb8b36a5f505e3532f6c756eabeb8b6c5" + ], + "debug-scripts/kubectl": [ + "kubernetes-worker", + "static", + "dadc2eae5818d818ac0b10029056d0db975406c17211864e08d1fa9780bb82c2" + ], + "debug-scripts/kubernetes-worker-services": [ + "kubernetes-worker", + "static", + "fca2c57d754d9968c80308031fd9de7cfd2ddda37de5b2ff49ba1ccf333c5a58" + ], + "debug-scripts/network": [ + "layer:debug", + "static", + "714afae5dcb45554ff1f05285501e3b7fcc656c8de51217e263b93dab25a9d2e" + ], + "debug-scripts/packages": [ + "layer:debug", + "static", + "e8177102dc2ca853cb9272c1257cf2cfd5253d2a074e602d07c8bc4ea8e27c75" + ], + "debug-scripts/sysctl": [ + "layer:debug", + "static", + "990035b320e09cc2228e1f2f880e795d51118b2959339eacddff9cbb74349c6a" + ], + "debug-scripts/systemd": [ + "layer:debug", + "static", + "23ddf533198bf5b1ce723acde31ada806aab8539292b514c721d8ec08af74106" + ], + "debug-scripts/tls-certs": [ + "layer:tls-client", + "static", + "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b" + ], + "docs/status.md": [ + "layer:status", + "static", + "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a" + ], + "exec.d/docker-compose/charm-pre-install": [ + "layer:kubernetes-node-base", + "static", + "32482c2a88209cbe512990db5fb4deabdcff88282bf7c7dd71a265383139fc77" + ], + "exec.d/vmware-patch/charm-pre-install": [ + "kubernetes-worker", + "static", + "9f98f70669ddd949ff83c7b408b678ae170bf41e4faa2828b4d66bd47acca93e" + ], + "hooks/aws-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/aws-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/azure-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/certificates-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/collect-metrics": [ + "layer:metrics", + "static", + "139fe18ce4cf2bed2155d3d0fce1c3b4cf1bc2598242cda42b3d772ec9bf8558" + ], + "hooks/config-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/container-runtime-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-changed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/coordinator-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/coordinator-relation-departed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/coordinator-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/gcp-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/hook.template": [ + "layer:basic", + "static", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ingress-proxy-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ingress-proxy-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ingress-proxy-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ingress-proxy-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/ingress-proxy-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/install": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-api-endpoint-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/kube-control-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/leader-elected": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/leader-settings-changed": [ + "layer:coordinator", + "static", + "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" + ], + "hooks/nfs-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nfs-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nfs-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nfs-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nfs-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/openstack-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/post-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/pre-series-upgrade": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/relations/aws-integration/.gitignore": [ + "interface:aws-integration", + "static", + "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b" + ], + "hooks/relations/aws-integration/LICENSE": [ + "interface:aws-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/aws-integration/README.md": [ + "interface:aws-integration", + "static", + "1585d72b136158ce0741fc2ce0d7710c1ec55662f846afe2e768a4708c51057e" + ], + "hooks/relations/aws-integration/__init__.py": [ + "interface:aws-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/aws-integration/copyright": [ + "interface:aws-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/aws-integration/docs/provides.md": [ + "interface:aws-integration", + "static", + "a7669f49156173c27ede87105f6e65a07e1e5e41f3c154a24e1a82f307f65073" + ], + "hooks/relations/aws-integration/docs/requires.md": [ + "interface:aws-integration", + "static", + "09553e5f07f216e5234125fdf38a21af00ab11349cdb788b21703ae72b0aeed1" + ], + "hooks/relations/aws-integration/interface.yaml": [ + "interface:aws-integration", + "static", + "4449f48e5aaa99c0bb3e8e1c9833d11d3b20fc5f81ae1f15b6442af5ec873167" + ], + "hooks/relations/aws-integration/make_docs": [ + "interface:aws-integration", + "static", + "b471fefc7eaa5c377d47b2b63481d6c8f4c5e9d224428efe93c5abbd13a0817d" + ], + "hooks/relations/aws-integration/provides.py": [ + "interface:aws-integration", + "static", + "ee8f91b281d9112999f3d0e1d2ac17964fca3af5102fe5b072f3f3659b932ab7" + ], + "hooks/relations/aws-integration/pydocmd.yml": [ + "interface:aws-integration", + "static", + "8c242cde2b2517c74de8ad6b1b90d2f6d97b2eb86c54edaf2eb8a8f7d32913e8" + ], + "hooks/relations/aws-integration/requires.py": [ + "interface:aws-integration", + "static", + "3006d6a2607bc15507bec3e6144093c6938a51a22eee1f550d714ff702728c39" + ], + "hooks/relations/azure-integration/.gitignore": [ + "interface:azure-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/azure-integration/LICENSE": [ + "interface:azure-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/azure-integration/README.md": [ + "interface:azure-integration", + "static", + "c7799dba9471709e086dcd2ea272ad7a6e33f5058d875ce2bf5b3a6939d4a1e7" + ], + "hooks/relations/azure-integration/__init__.py": [ + "interface:azure-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/azure-integration/copyright": [ + "interface:azure-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/azure-integration/docs/provides.md": [ + "interface:azure-integration", + "static", + "60ae63187cac32c00d9f462f1723c9487960c728beae871f1a409c92196cc1f5" + ], + "hooks/relations/azure-integration/docs/requires.md": [ + "interface:azure-integration", + "static", + "b01e313c8ce3d02093e851bd84d5e8b7ae77b300c4b06b5048bddc78c1ad3eb3" + ], + "hooks/relations/azure-integration/interface.yaml": [ + "interface:azure-integration", + "static", + "cea5bfd87c278bd3f2e8dc00e654930f06d2bd91ef731a063edea14b04d9128a" + ], + "hooks/relations/azure-integration/make_docs": [ + "interface:azure-integration", + "static", + "e76f4a64c2fdc4a9f97a57d6515b4a25f9404d7043f2792db5206bc44213927c" + ], + "hooks/relations/azure-integration/provides.py": [ + "interface:azure-integration", + "static", + "33af701c7abd51e869de945c1f032749136c66560bb604e8e72521dc9d7e495b" + ], + "hooks/relations/azure-integration/pydocmd.yml": [ + "interface:azure-integration", + "static", + "4c17085efb4ec328891b49257413eed4d9a552eeea8e589509e48081effe51ed" + ], + "hooks/relations/azure-integration/requires.py": [ + "interface:azure-integration", + "static", + "2e60fecf8bc65d84124742d0833afc90d2e839f5dfa2923e8d1849063c51f47a" + ], + "hooks/relations/container-runtime/.gitignore": [ + "interface:container-runtime", + "static", + "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732" + ], + "hooks/relations/container-runtime/LICENSE": [ + "interface:container-runtime", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "hooks/relations/container-runtime/README.md": [ + "interface:container-runtime", + "static", + "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611" + ], + "hooks/relations/container-runtime/__init__.py": [ + "interface:container-runtime", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/container-runtime/interface.yaml": [ + "interface:container-runtime", + "static", + "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6" + ], + "hooks/relations/container-runtime/provides.py": [ + "interface:container-runtime", + "static", + "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06" + ], + "hooks/relations/container-runtime/requires.py": [ + "interface:container-runtime", + "static", + "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761" + ], + "hooks/relations/coordinator/peers.py": [ + "layer:coordinator", + "static", + "d615c442396422a30a0c5f7639750d15bb59247ae5d9362c4f5dc8dd2cc7fff2" + ], + "hooks/relations/gcp-integration/.gitignore": [ + "interface:gcp-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/gcp-integration/LICENSE": [ + "interface:gcp-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/gcp-integration/README.md": [ + "interface:gcp-integration", + "static", + "dab3f4a03f02dec0095883054780e3e3f1bf63262b06a9fd499364a3db8b1e97" + ], + "hooks/relations/gcp-integration/__init__.py": [ + "interface:gcp-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/gcp-integration/copyright": [ + "interface:gcp-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/gcp-integration/docs/provides.md": [ + "interface:gcp-integration", + "static", + "a67cda4094b4d601c8de63cf099ba2e83fecf3a8382e88f44e58b98be8872fa6" + ], + "hooks/relations/gcp-integration/docs/requires.md": [ + "interface:gcp-integration", + "static", + "d7e6d7dc90b74d35bf2bd10b00b3ba289ab856dc79ec51046508a85b9dda35a3" + ], + "hooks/relations/gcp-integration/interface.yaml": [ + "interface:gcp-integration", + "static", + "368e8ade9267b905dcb2e6843e7ed61bd6d246f0b0c18942e729f546d5db2260" + ], + "hooks/relations/gcp-integration/make_docs": [ + "interface:gcp-integration", + "static", + "5bf011da5045c31da97a67b8633d30ea90adc6c0d4d823f839fce6e07e5fe222" + ], + "hooks/relations/gcp-integration/provides.py": [ + "interface:gcp-integration", + "static", + "839f15cf978cf94343772889846ad3e2b8375372ef25ed08036207e5608b1f48" + ], + "hooks/relations/gcp-integration/pydocmd.yml": [ + "interface:gcp-integration", + "static", + "2d5a524cbde5ccf732b67382a85deb7c26dfb92315c30d26c2b2d5632a2a8f38" + ], + "hooks/relations/gcp-integration/requires.py": [ + "interface:gcp-integration", + "static", + "79c75c6c76b37bc5ac486ac2e14f853223c4c603850d2f231f187ab255cbdbf0" + ], + "hooks/relations/http/.gitignore": [ + "interface:http", + "static", + "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8" + ], + "hooks/relations/http/README.md": [ + "interface:http", + "static", + "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d" + ], + "hooks/relations/http/__init__.py": [ + "interface:http", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/http/interface.yaml": [ + "interface:http", + "static", + "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13" + ], + "hooks/relations/http/provides.py": [ + "interface:http", + "static", + "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877" + ], + "hooks/relations/http/requires.py": [ + "interface:http", + "static", + "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd" + ], + "hooks/relations/kube-control/.travis.yml": [ + "interface:kube-control", + "static", + "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" + ], + "hooks/relations/kube-control/README.md": [ + "interface:kube-control", + "static", + "66ee58f59efceefa21f7f2d7f88c1d75c07a16bbec8d09a83a7fda6373eab421" + ], + "hooks/relations/kube-control/__init__.py": [ + "interface:kube-control", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kube-control/interface.yaml": [ + "interface:kube-control", + "static", + "07e3d781283ecbb59c780cc8e4aeb9f030f22d2db6c28d731b74a36ab126960d" + ], + "hooks/relations/kube-control/provides.py": [ + "interface:kube-control", + "static", + "5a99a8549e0c9b41fbc1800b39a1ac2df8c46e1d33ec6c295c6ab139cd28ed56" + ], + "hooks/relations/kube-control/requires.py": [ + "interface:kube-control", + "static", + "c5650e6db3d47b3770e72ddddc68bfd84b0a643866cf67495c148625179a2465" + ], + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", + "static", + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" + ], + "hooks/relations/kubernetes-cni/.gitignore": [ + "interface:kubernetes-cni", + "static", + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" + ], + "hooks/relations/kubernetes-cni/README.md": [ + "interface:kubernetes-cni", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kubernetes-cni/__init__.py": [ + "interface:kubernetes-cni", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/kubernetes-cni/interface.yaml": [ + "interface:kubernetes-cni", + "static", + "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d" + ], + "hooks/relations/kubernetes-cni/provides.py": [ + "interface:kubernetes-cni", + "static", + "2da15a0d547c3d3a6fb4745078a54d61136362c343fdf8635de14dbf714ba264" + ], + "hooks/relations/kubernetes-cni/requires.py": [ + "interface:kubernetes-cni", + "static", + "2544a8ea5f5947f8b729a0db1efe9506d2bba819ba2798eba1437a6a725c17d4" + ], + "hooks/relations/mount/.gitignore": [ + "interface:mount", + "static", + "f107e9960f299957deb6087dbc043b5ca51a7e78f5895f9444bb5bf91a6b579d" + ], + "hooks/relations/mount/LICENSE": [ + "interface:mount", + "static", + "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + ], + "hooks/relations/mount/README.md": [ + "interface:mount", + "static", + "b8dc1667fe75f4339b4a6cfcb2272eb2c066268c1de08d24dd95880c0ba32e2d" + ], + "hooks/relations/mount/__init__.py": [ + "interface:mount", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/mount/copyright": [ + "interface:mount", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/mount/interface.yaml": [ + "interface:mount", + "static", + "038465e3afcdc6344a43fe5e224cb3468866e311d9c0c83920b4454c4ac8b602" + ], + "hooks/relations/mount/provides.py": [ + "interface:mount", + "static", + "39a3c6f245f2df8e3df82f7995207eaec06e0beec4bc6c412d30c777a7794e88" + ], + "hooks/relations/mount/requires.py": [ + "interface:mount", + "static", + "c2e9ad42d6009818211bb28d11e365f90b073829d5cc847998060b6009e37ff3" + ], + "hooks/relations/nrpe-external-master/README.md": [ + "interface:nrpe-external-master", + "static", + "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636" + ], + "hooks/relations/nrpe-external-master/__init__.py": [ + "interface:nrpe-external-master", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/nrpe-external-master/interface.yaml": [ + "interface:nrpe-external-master", + "static", + "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0" + ], + "hooks/relations/nrpe-external-master/provides.py": [ + "interface:nrpe-external-master", + "static", + "54e5400de99c051ecf6453776ad416b1cb8c6b73b34cbe2f41b617a8ed7b9daa" + ], + "hooks/relations/openstack-integration/.gitignore": [ + "interface:openstack-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/openstack-integration/LICENSE": [ + "interface:openstack-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/openstack-integration/README.md": [ + "interface:openstack-integration", + "static", + "ca58e21bd973f6e65f7a8a06b4aeabd50bf137ab6fab9c8defa8789b02df3aa5" + ], + "hooks/relations/openstack-integration/__init__.py": [ + "interface:openstack-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/openstack-integration/copyright": [ + "interface:openstack-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/openstack-integration/docs/provides.md": [ + "interface:openstack-integration", + "static", + "ec4b81da3dfeac892f94053d753b56e504f5fd9c6ec4e743efa40efade3aa651" + ], + "hooks/relations/openstack-integration/docs/requires.md": [ + "interface:openstack-integration", + "static", + "95424fe767a26e3208800b4099f8768212b0a72b989ee145f181b67d678e3bbe" + ], + "hooks/relations/openstack-integration/interface.yaml": [ + "interface:openstack-integration", + "static", + "11b07a41bd2e24765231c4b7c7218da15f2173398d8d73698ecb210e599d02f6" + ], + "hooks/relations/openstack-integration/make_docs": [ + "interface:openstack-integration", + "static", + "a564aac288cc0bf4ff14418a341f11b065988c2b64adf93ec451e09dd92dcea5" + ], + "hooks/relations/openstack-integration/provides.py": [ + "interface:openstack-integration", + "static", + "b057676b2d51e99d3df4c7b2699887394c20228aeed692cd64fa832fb84b392d" + ], + "hooks/relations/openstack-integration/pydocmd.yml": [ + "interface:openstack-integration", + "static", + "3568f8a3c1446dfd736f31050e2b470bf125cc41717d156a4b866c7ea53861be" + ], + "hooks/relations/openstack-integration/requires.py": [ + "interface:openstack-integration", + "static", + "2fb96bf45e0b24d2da57f56c640b163b5ee4df4d698f7481af6efa3470d16263" + ], + "hooks/relations/prometheus/.gitignore": [ + "interface:prometheus", + "static", + "32dae3052f331ee34d628ef535709b301259a45df7c7522c4d35dcf49873f00b" + ], + "hooks/relations/prometheus/__init__.py": [ + "interface:prometheus", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/prometheus/interface.yaml": [ + "interface:prometheus", + "static", + "619dfa6556cf927fb667cbe3507e68890f3c90536fa9a49af168a0f53de1ccfc" + ], + "hooks/relations/prometheus/provides.py": [ + "interface:prometheus", + "static", + "07b2706dc6f2fc7322088ca486c0158d5bd2a86af1adff45d3a147d3e6fef4ec" + ], + "hooks/relations/prometheus/requires.py": [ + "interface:prometheus", + "static", + "9cdccd7182fa5b7ed65937aefe305dfe2cffe25354c5f4ef779d6a2c6546428a" + ], + "hooks/relations/tls-certificates/.gitignore": [ + "interface:tls-certificates", + "static", + "b485e74def213c534676224e655e9276b62d401ebc643508ddc545dd335cb6dc" + ], + "hooks/relations/tls-certificates/README.md": [ + "interface:tls-certificates", + "static", + "6851227de8fcca7edfd504159dbe3e3af31080af64df46f3d3b345da7630827a" + ], + "hooks/relations/tls-certificates/__init__.py": [ + "interface:tls-certificates", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/tls-certificates/docs/common.md": [ + "interface:tls-certificates", + "static", + "5e91d6637fc0ccc50af2776de9e59a0f8098244b627816b2e18fabb266e980ff" + ], + "hooks/relations/tls-certificates/docs/provides.md": [ + "interface:tls-certificates", + "static", + "5c12dfca99b5c15ba10b4e7f7cff4cb4c9b621b198deba5f2397d3c837d035fe" + ], + "hooks/relations/tls-certificates/docs/requires.md": [ + "interface:tls-certificates", + "static", + "148dd1de163d75253f0a9d3c35e108dcaacbc9bdf97e47186743e6c82a67b62e" + ], + "hooks/relations/tls-certificates/interface.yaml": [ + "interface:tls-certificates", + "static", + "e412e54b1d327bad15a882f7f0bf996212090db576b863cc9cff7a68afc0e4fa" + ], + "hooks/relations/tls-certificates/make_docs": [ + "interface:tls-certificates", + "static", + "3671543bddc9d277171263310e404df3f11660429582cb27b39b7e7ec8757a37" + ], + "hooks/relations/tls-certificates/provides.py": [ + "interface:tls-certificates", + "static", + "be2a4b9a411c770989c529fd887070ad91649481a13f5239cfd8751f234b637c" + ], + "hooks/relations/tls-certificates/pydocmd.yml": [ + "interface:tls-certificates", + "static", + "48a233f60a89f87d56e9bc715e05766f5d39bbea2bc8741ed31f67b30c8cfcb8" + ], + "hooks/relations/tls-certificates/requires.py": [ + "interface:tls-certificates", + "static", + "442d773112079bc674d3e6be75b00323fcad7efd2f03613a1972b575dd438dba" + ], + "hooks/relations/tls-certificates/tls_certificates_common.py": [ + "interface:tls-certificates", + "static", + "068bd32ba69bfa514e1da386919d18b348ee678b40c372f275c9110f2cc4677c" + ], + "hooks/relations/vsphere-integration/.gitignore": [ + "interface:vsphere-integration", + "static", + "9653f2820c79d92ac3518eedd0e1f43ffec128d5df9216c25d906fcba8ee46b8" + ], + "hooks/relations/vsphere-integration/LICENSE": [ + "interface:vsphere-integration", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/vsphere-integration/README.md": [ + "interface:vsphere-integration", + "static", + "8de815f0f938cb8f58c536899ed87e55aac507a782093bd50d50bd3c1d6add1c" + ], + "hooks/relations/vsphere-integration/__init__.py": [ + "interface:vsphere-integration", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/vsphere-integration/copyright": [ + "interface:vsphere-integration", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], + "hooks/relations/vsphere-integration/docs/provides.md": [ + "interface:vsphere-integration", + "static", + "daa3c44a2df6d774adc60bde1160f1e307129be9d696f018eab4a7e713ee775a" + ], + "hooks/relations/vsphere-integration/docs/requires.md": [ + "interface:vsphere-integration", + "static", + "4e79bb1b151f1de63b423d39a6e1831efbb6f767fe5b84963162f62c6bbb9123" + ], + "hooks/relations/vsphere-integration/interface.yaml": [ + "interface:vsphere-integration", + "static", + "20295b882dfb9a1750d8e988eaa3383cd3109fae510785ba4e415d7fa9b118af" + ], + "hooks/relations/vsphere-integration/make_docs": [ + "interface:vsphere-integration", + "static", + "cd9d91049ee3c6e6148f4bd9204a34463dde905ce665cff25be014ffc1b81b89" + ], + "hooks/relations/vsphere-integration/provides.py": [ + "interface:vsphere-integration", + "static", + "8ccb09c4a3009b59caea227ef40395fb063d3e8ce983338060fb59bbe74138c0" + ], + "hooks/relations/vsphere-integration/pydocmd.yml": [ + "interface:vsphere-integration", + "static", + "9f8eb566569977f10955da67def28886737e80914ae000e4acfae1313d08f105" + ], + "hooks/relations/vsphere-integration/requires.py": [ + "interface:vsphere-integration", + "static", + "d56702f60037f06259752d3bd7882f7ee46f60a4ce7b6d1071520d69ec9351f9" + ], + "hooks/scrape-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/scrape-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/scrape-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/scrape-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/scrape-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/start": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/stop": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/update-status": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/upgrade-charm": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/vsphere-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "icon.svg": [ + "kubernetes-worker", + "static", + "006aca35eab800e95c476f398abeeb5233df4c668ca42f056b5a3dd8cd253371" + ], + "layer.yaml": [ + "kubernetes-worker", + "dynamic", + "315d1c9d414c18199097860be3586d8082128a541f96db7b26fb47b822336a65" + ], + "lib/charms/apt.py": [ + "layer:apt", + "static", + "c7613992eb33ac94d83fbf02f467b614ea5112eaf561c4715def90989cefa531" + ], + "lib/charms/coordinator.py": [ + "layer:coordinator", + "static", + "6dbacc87605be8efcbf19ec05341e4eb210327724495c79998a46947e034dbea" + ], + "lib/charms/layer/__init__.py": [ + "layer:basic", + "static", + "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f" + ], + "lib/charms/layer/basic.py": [ + "layer:basic", + "static", + "d120158e0c305a3b4529426a1a63a2f59af4f5730dccf3a59a9ffe1988494cee" + ], + "lib/charms/layer/execd.py": [ + "layer:basic", + "static", + "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d" + ], + "lib/charms/layer/kubernetes_common.py": [ + "layer:kubernetes-common", + "static", + "bc89bd609a8e94102e00a192b7ae3caa813cca5e356536330494742bfdb6c4cb" + ], + "lib/charms/layer/kubernetes_node_base.py": [ + "layer:kubernetes-node-base", + "static", + "a7aee0b46a033497762d3e2d4e4308c56a3da72b693bf23d58c1bd4dcd9426d1" + ], + "lib/charms/layer/nagios.py": [ + "layer:nagios", + "static", + "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c" + ], + "lib/charms/layer/options.py": [ + "layer:options", + "static", + "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2" + ], + "lib/charms/layer/snap.py": [ + "layer:snap", + "static", + "cac372a755d27c4aed87f2ad87e17d1bb5157f7e262ca6d249b1aac70a986a22" + ], + "lib/charms/layer/status.py": [ + "layer:status", + "static", + "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6" + ], + "lib/charms/layer/tls_client.py": [ + "layer:tls-client", + "static", + "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e" + ], + "lib/charms/leadership.py": [ + "layer:leadership", + "static", + "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332" + ], + "lib/debug_script.py": [ + "layer:debug", + "static", + "a4d56f2d3e712b1b5cadb657c7195c6268d0aac6d228991049fd769e0ddaf453" + ], + "lxd-profile.yaml": [ + "kubernetes-worker", + "static", + "e62700f1993721652d83756f89e1f8b33c5d0dec6fb27554f61aaf96ccd4e379" + ], + "make_docs": [ + "layer:status", + "static", + "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1" + ], + "manifest.yaml": [ + "kubernetes-worker", + "static", + "ed0a9900c7c3eb181ac0734d3df18f9647bf380e2520dd490130fe8c52b63c21" + ], + "metadata.yaml": [ + "kubernetes-worker", + "dynamic", + "052f8a979e8d91cc973edf65bc6744043209030c69a257bb321d3abcf48bba7a" + ], + "metrics.yaml": [ + "kubernetes-worker", + "static", + "94a5eb0b0966f8ba434d91ff1e9b99b1b4c3b3044657b236d4e742d3e0d57c47" + ], + "pydocmd.yml": [ + "layer:status", + "static", + "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330" + ], + "pyproject.toml": [ + "layer:apt", + "static", + "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de" + ], + "reactive/__init__.py": [ + "layer:coordinator", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "reactive/apt.py": [ + "layer:apt", + "static", + "6fe40f18eb84a910a71a4acb7ec74856128de846de6029b4fc297a875692c837" + ], + "reactive/cdk_service_kicker.py": [ + "layer:cdk-service-kicker", + "static", + "cc2648443016a18324ecb26acb71d69c71610ba23df235f280383552136f7efc" + ], + "reactive/coordinator.py": [ + "layer:coordinator", + "static", + "18cda7ddf00ae0e47578d489fc3ebb376b4428cd0559797a87ddbead54360d02" + ], + "reactive/kubernetes_node_base.py": [ + "layer:kubernetes-node-base", + "static", + "ec71ca98f86d11552984054b3ebba9194c0cf71fbfb28b2a2a666afe11979d62" + ], + "reactive/kubernetes_worker.py": [ + "kubernetes-worker", + "static", + "ab447bdfc97a26e96edf7e407db0616ad81d23d72fef3aad0e14611111498186" + ], + "reactive/leadership.py": [ + "layer:leadership", + "static", + "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f" + ], + "reactive/snap.py": [ + "layer:snap", + "static", + "de11948e6a44a7186707266235d9fc133e59584c16a8d5d3be163dc0dd3bd46a" + ], + "reactive/status.py": [ + "layer:status", + "static", + "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207" + ], + "reactive/tls_client.py": [ + "layer:tls-client", + "static", + "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8" + ], + "requirements.txt": [ + "layer:basic", + "static", + "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" + ], + "setup.py": [ + "layer:snap", + "static", + "b219c8c6cb138a2f70a8ef9136d1cc3fe6210bd1e28c99fccb5e7ae90d547164" + ], + "templates/cdk-service-kicker": [ + "layer:cdk-service-kicker", + "static", + "b17adff995310e14d1b510337efa0af0531b55e2c487210168829e0dc1a6f99b" + ], + "templates/cdk-service-kicker.service": [ + "layer:cdk-service-kicker", + "static", + "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029" + ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], + "templates/default-http-backend.yaml": [ + "kubernetes-worker", + "static", + "e31bb19574e6f23bb89cc30475d5e8b41fa96a8f67aa0b2d01316902584fa4e4" + ], + "templates/ingress-daemon-set.yaml": [ + "kubernetes-worker", + "static", + "c3b7e7d95c8a4cd0079145be797346b77a64f3b48b808cddbfdc33787b48d316" + ], + "templates/microbot-example.yaml": [ + "kubernetes-worker", + "static", + "fb8feb88979eb5d0cfcbf9a5169387667a2224c72b0aae7f01310caa8c094ebe" + ], + "templates/nagios_plugin.py": [ + "kubernetes-worker", + "static", + "8b425bb29ed41ee1b1c2fddc7acf5f24f5c6a0cf7432c86cf8486434032fcb14" + ], + "templates/nfs-provisioner.yaml": [ + "kubernetes-worker", + "static", + "e3ee7c995c9a3624daffdc9a09467e9e274b38a4bb6c3851d928bf7bf1151fac" + ], + "tests/data/ip_addr_json": [ + "layer:kubernetes-common", + "static", + "f129576a9e2c7738aca8669c642f123534eda63121ae450cec4cbda787b1eb06" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/unit/conftest.py": [ + "layer:kubernetes-node-base", + "static", + "ac76b7e250c8dfe68148ebd42d9275dfcceb404d2d2bc1d43ee9d9f7d90fc82b" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "23e097e7f21e4f4f062caac0146bb85373e895a30be1be5667b90d0e84435882" + ], + "tests/unit/test_layer.py": [ + "layer:kubernetes-node-base", + "static", + "67a2c3f0f8703e020bd92ea169f414e504d4af5c20cf8345ddb6a2d36d4ffa75" + ], + "tox.ini": [ + "layer:kubernetes-node-base", + "static", + "243ee4f6113e4c481adf1cb03fea30f96207a1274ce36a7fa5fb8f916f603dd1" + ], + "version": [ + "kubernetes-worker", + "dynamic", + "a36d32d4b537bff7998870faf8069acd3e73541bab3bc95f15ba95ad12ec9e99" + ], + "wheelhouse.txt": [ + "kubernetes-worker", + "dynamic", + "a020e95e841e5f28e25d6c0906bdaab82ef9b730d8d15b314464c75ffa46ba72" + ], + "wheelhouse/Jinja2-3.0.3.tar.gz": [ + "layer:basic", + "dynamic", + "611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7" + ], + "wheelhouse/MarkupSafe-2.0.1.tar.gz": [ + "layer:basic", + "dynamic", + "594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a" + ], + "wheelhouse/PyYAML-5.3.1.tar.gz": [ + "layer:basic", + "dynamic", + "b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d" + ], + "wheelhouse/charmhelpers-1.2.1.tar.gz": [ + "layer:basic", + "dynamic", + "298bb9e90d9392e2b66d10a5199b1b2d459dc8d5434b897913325904989dd2d7" + ], + "wheelhouse/charms.reactive-1.5.0.tar.gz": [ + "layer:basic", + "dynamic", + "b56484ed17f412c7738ff21e4ddc0e7c758af2288eac9fe521a86c8c31c1b150" + ], + "wheelhouse/netaddr-0.7.19.tar.gz": [ + "layer:basic", + "dynamic", + "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd" + ], + "wheelhouse/pbr-5.9.0.tar.gz": [ + "__pip__", + "dynamic", + "e8dca2f4b43560edef58813969f52a56cef023146cbb8931626db80e6c1c4308" + ], + "wheelhouse/pip-18.1.tar.gz": [ + "layer:basic", + "dynamic", + "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" + ], + "wheelhouse/pyaml-21.10.1.tar.gz": [ + "__pip__", + "dynamic", + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" + ], + "wheelhouse/setuptools-41.6.0.zip": [ + "layer:basic", + "dynamic", + "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722" + ], + "wheelhouse/setuptools_scm-1.17.0.tar.gz": [ + "layer:basic", + "dynamic", + "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" + ], + "wheelhouse/six-1.16.0.tar.gz": [ + "__pip__", + "dynamic", + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" + ], + "wheelhouse/tenacity-5.0.3.tar.gz": [ + "layer:snap", + "dynamic", + "24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b" + ], + "wheelhouse/wheel-0.33.6.tar.gz": [ + "layer:basic", + "dynamic", + "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646" + ] + } +} \ No newline at end of file diff --git a/kubernetes-worker/.github/workflows/main.yaml b/kubernetes-worker/.github/workflows/main.yaml new file mode 100644 index 0000000..ba25b2e --- /dev/null +++ b/kubernetes-worker/.github/workflows/main.yaml @@ -0,0 +1,31 @@ +name: Test Suite +on: [pull_request] + +jobs: + call-inclusive-naming-check: + name: Inclusive naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + lint-unit: + name: Lint, Unit + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + sudo snap install charm --classic + - name: Lint + run: tox -vve lint + - name: Unit Tests + run: tox -vve unit diff --git a/kubernetes-worker/.github/workflows/main.yml b/kubernetes-worker/.github/workflows/main.yml new file mode 100644 index 0000000..6febe72 --- /dev/null +++ b/kubernetes-worker/.github/workflows/main.yml @@ -0,0 +1,39 @@ +name: Test Suite +on: [pull_request] + +jobs: + call-inclusive-naming-check: + name: Inclusive naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + validate-wheelhouse: + name: Validate Wheelhouse + uses: charmed-kubernetes/workflows/.github/workflows/validate-wheelhouse.yaml@main + + lint-unit: + name: Lint Unit + uses: charmed-kubernetes/workflows/.github/workflows/lint-unit.yaml@main + + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-constraints: "arch=amd64 cores=2 mem=4G" + bootstrap-options: "${{ secrets.FOCAL_BOOTSTRAP_OPTIONS }} --model-default datastore=vsanDatastore --model-default primary-network=VLAN_2763" + - name: Run test + run: tox -e integration diff --git a/kubernetes-worker/.gitignore b/kubernetes-worker/.gitignore new file mode 100644 index 0000000..15deb39 --- /dev/null +++ b/kubernetes-worker/.gitignore @@ -0,0 +1,5 @@ +.tox/ +__pycache__/ +*.pyc +placeholders/ +*.tgz diff --git a/kubernetes-worker/.travis.yml b/kubernetes-worker/.travis.yml new file mode 100644 index 0000000..66d8e1f --- /dev/null +++ b/kubernetes-worker/.travis.yml @@ -0,0 +1,7 @@ +language: python +python: + - "3.5" +install: + - pip install tox-travis +script: + - tox diff --git a/kubernetes-worker/CONTRIBUTING.md b/kubernetes-worker/CONTRIBUTING.md new file mode 100644 index 0000000..3de561f --- /dev/null +++ b/kubernetes-worker/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# Contributor Guide + +This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions +for code, suggestions and documentation. +This page details a few notes, workflows and suggestions for how to make contributions most effective and help us +all build a better charm - please give them a read before working on any contributions. + +## Licensing + +This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may +make to this project. Please familiarise yourself with the terms of the license. + +Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to +use your contributions. +In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your +code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before +making any contributions. + +## Code of conduct + +We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct). + +## Contributing code + +To contribute code to this project, please use the following workflow: + +1. [Submit a bug](https://bugs.launchpad.net/charm-kubernetes-worker/+filebug) to explain the need for and track the change. +2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code. +3. Submit a PR. The PR description should include a link to the bug on Launchpad. +4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag. +5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone + for further release according to priority. + +## Documentation + +Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs. +See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-kubernetes-worker.md) diff --git a/kubernetes-worker/HACKING.md b/kubernetes-worker/HACKING.md new file mode 100644 index 0000000..28e380c --- /dev/null +++ b/kubernetes-worker/HACKING.md @@ -0,0 +1,25 @@ + # Kubernetes Worker + +### Building from the layer + +You can clone the kubernetes-worker layer with git and build locally if you +have the charm package/snap installed. + +```shell +# Instal the snap +sudo snap install charm --channel=edge + +# Set the build environment +export JUJU_REPOSITORY=$HOME + +# Clone the layer and build it to our JUJU_REPOSITORY +git clone https://github.com/juju-solutions/kubernetes +cd kubernetes/cluster/juju/layers/kubernetes-worker +charm build -r +``` + +### Contributing + +TBD + + diff --git a/kubernetes-worker/LICENSE b/kubernetes-worker/LICENSE new file mode 100644 index 0000000..0543093 --- /dev/null +++ b/kubernetes-worker/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright Canonical, Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/kubernetes-worker/Makefile b/kubernetes-worker/Makefile new file mode 100644 index 0000000..a1ad3a5 --- /dev/null +++ b/kubernetes-worker/Makefile @@ -0,0 +1,24 @@ +#!/usr/bin/make + +all: lint unit_test + + +.PHONY: clean +clean: + @rm -rf .tox + +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) + +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/kubernetes-worker/README.md b/kubernetes-worker/README.md new file mode 100644 index 0000000..40b7c92 --- /dev/null +++ b/kubernetes-worker/README.md @@ -0,0 +1,22 @@ +# Kubernetes Worker + +## Usage + +This charm deploys a container runtime, and additionally stands up the Kubernetes +worker applications: kubelet, and kube-proxy. + +In order for this charm to be useful, it should be deployed with its companion +charm [kubernetes-control-plane](https://charmhub.io/kubernetes-control-plane) +and linked with an SDN-Plugin and a container runtime such as +[containerd](https://charmhub.io/containerd). + +This charm is a component of Charmed Kubernetes. For full information, +please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-kubernetes-worker). + +## Developers + +### Building the charm + +``` +make charm +``` diff --git a/kubernetes-worker/actions.yaml b/kubernetes-worker/actions.yaml new file mode 100644 index 0000000..2de58bd --- /dev/null +++ b/kubernetes-worker/actions.yaml @@ -0,0 +1,75 @@ +"debug": + "description": "Collect debug data" +"cis-benchmark": + "description": | + Run the CIS Kubernetes Benchmark against snap-based components. + "params": + "apply": + "type": "string" + "default": "none" + "description": | + Apply remediations to address benchmark failures. The default, 'none', + will not attempt to fix any reported failures. Set to 'conservative' + to resolve simple failures. Set to 'dangerous' to attempt to resolve + all failures. + + Note: Applying any remediation may result in an unusable cluster. + "config": + "type": "string" + "default": "https://github.com/charmed-kubernetes/kube-bench-config/archive/cis-1.23.zip#sha1=3cda2fc68b4ca36f69f5913bfc0b02576e7a3b3d" + "description": | + Archive containing configuration files to use when running kube-bench. + The default value is known to be compatible with snap components. When + using a custom URL, append '#=' to verify the + archive integrity when downloaded. + "release": + "type": "string" + "default": "https://github.com/aquasecurity/kube-bench/releases/download/v0.6.8/kube-bench_0.6.8_linux_amd64.tar.gz#sha256=5f9c5231949bd022a6993f5297cc05bb80a1b7c36a43cefed0a8c8af26778863" + "description": | + Archive containing the 'kube-bench' binary to run. The default value + points to a stable upstream release. When using a custom URL, append + '#=' to verify the archive integrity when + downloaded. + + This may also be set to the special keyword 'upstream'. In this case, + the action will compile and use a local kube-bench binary built from + the master branch of the upstream repository: + https://github.com/aquasecurity/kube-bench + +"pause": + "description": | + Mark the node as unschedulable to prevent new pods from arriving, and + evict existing pods. + "params": + "delete-local-data": + "type": "boolean" + "description": | + Continue even if there are pods using emptyDir (local data that will + be deleted when the node is drained). + "default": !!bool "false" + "force": + "type": "boolean" + "description": | + Continue even if there are pods not managed by a + ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet. + "default": !!bool "false" +"resume": + "description": | + Mark node as schedulable. +"microbot": + "description": "Launch microbot containers" + "params": + "delete": + "type": "boolean" + "default": !!bool "false" + "description": "Remove a microbots deployment, service, and ingress if True." + "registry": + "type": "string" + "default": "rocks.canonical.com:443/cdk" + "description": "Registry to use for the microbot image." + "replicas": + "type": "integer" + "default": !!int "3" + "description": "Number of microbots to launch in Kubernetes." +"upgrade": + "description": "Upgrade the kubernetes snaps" diff --git a/kubernetes-worker/actions/cis-benchmark b/kubernetes-worker/actions/cis-benchmark new file mode 100755 index 0000000..ed7d763 --- /dev/null +++ b/kubernetes-worker/actions/cis-benchmark @@ -0,0 +1,396 @@ +#!/usr/local/sbin/charm-env python3 +import os +import json +import shlex +import shutil +import subprocess +import sys +import tempfile +from pathlib import Path + +import charms.layer +import charms.reactive +from charmhelpers.core import hookenv, unitdata +from charmhelpers.fetch.archiveurl import ArchiveUrlFetchHandler +from charms.layer import snap +from charms.reactive import clear_flag, is_flag_set, set_flag + + +BENCH_HOME = "/home/ubuntu/kube-bench" +BENCH_BIN = "{}/kube-bench".format(BENCH_HOME) +BENCH_CFG = "{}/cfg-ck".format(BENCH_HOME) +GO_PKG = "github.com/aquasecurity/kube-bench" +RESULTS_DIR = "/home/ubuntu/kube-bench-results" + +# Remediation dicts associate a failing test with a tuple to fix it. +# Conservative fixes will probably leave the cluster in a good state. +# Dangerous fixes will likely break the cluster. +# Tuple examples: +# {'1.2.3': ('manual -- we don't know how to auto fix this', None, None)} +# {'1.2.3': ('cli', 'command to run', None)} +# {'1.2.3': ('kv', 'snap', {cfg_key: value})} +CONSERVATIVE = { + "0.0.0": ("cli", 'echo "this is fine"', None), + # etcd (no known failures with a default install) + # k8s-control-plane (no known failures with a default install) + # k8s-worker (no known failures with a default install) +} +ADMISSION_PLUGINS = { + "enable-admission-plugins": ( + "PersistentVolumeLabel", + "PodSecurityPolicy," "AlwaysPullImages", + "NodeRestriction", + ) +} +DANGEROUS = { + "0.0.0": ("cli", 'echo "this is fine"', None), + # etcd (no known warnings with a default install) + # k8s-control-plane + "1.1.21": ("cli", "chmod -R 600 /root/cdk/*.key", None), + "1.2.9": ("manual", None, None), + "1.2.11": ("kv", "kube-apiserver", ADMISSION_PLUGINS), + "1.2.25": ("manual", None, None), + "1.2.33": ("manual", None, None), + "1.2.34": ("manual", None, None), + # k8s-worker + "4.2.9": ("kv", "kubelet", {"event-qps": 0}), + "4.2.13": ( + "kv", + "kubelet", + { + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305," + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384," + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305," + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384," + "TLS_RSA_WITH_AES_256_GCM_SHA384," + "TLS_RSA_WITH_AES_128_GCM_SHA256" + }, + ), +} + + +def _fail(msg): + """Fail the action with a given message.""" + hookenv.action_fail(msg) + sys.exit() + + +def _move_matching_parent(dirpath, filename, dest): + """Move a parent directory that contains a specific file. + + Helper function that walks a directory looking for a given file. If found, + the file's parent directory is moved to the given destination. + + :param: dirpath: String path to search + :param: filename: String file to find + :param: dest: String destination of the found parent directory + """ + for root, _, files in os.walk(dirpath): + for name in files: + if name == filename: + hookenv.log("Moving {} to {}".format(root, dest)) + shutil.move(root, dest) + return + else: + _fail("Could not find {} in {}".format(filename, dirpath)) + + +def _restart_charm(): + """Set charm-specific flags and call reactive.main().""" + app = hookenv.charm_name() or "unknown" + if "master" in app: + hookenv.log("Restarting master") + clear_flag("kubernetes-master.components.started") + # or this app could have been upgrade to new flags + clear_flag("kubernetes-control-plane.components.started") + elif "control-plane" in app: + hookenv.log("Restarting control-plane") + clear_flag("kubernetes-control-plane.components.started") + elif "worker" in app: + hookenv.log("Restarting worker") + set_flag("kubernetes-worker.restart-needed") + elif "etcd" in app: + hookenv.log("No-op: etcd does not need to be restarted") + return + else: + _fail("Unable to determine the charm to restart: {}".format(app)) + + # Invoke reactive so the charm will react to the flags we just managed + charms.layer.import_layer_libs() + charms.reactive.main() + + +def install(release, config): + """Install kube-bench and related configuration. + + Release and configuration are set via action params. If installing an + upstream release, this method will also install 'go' if needed. + + :param: release: Archive URI or 'upstream' + :param: config: Archive URI of configuration files + """ + if Path(BENCH_HOME).exists(): + shutil.rmtree(BENCH_HOME) + fetcher = ArchiveUrlFetchHandler() + + if release == "upstream": + Path(BENCH_HOME).mkdir(parents=True, exist_ok=True) + + # Setup the 'go' environment + env = os.environ.copy() + go_bin = shutil.which("go", path="{}:/snap/bin".format(env["PATH"])) + if not go_bin: + snap.install("go", channel="stable", classic=True) + go_bin = "/snap/bin/go" + go_cache = os.getenv("GOCACHE", "/var/snap/go/common/cache") + go_path = os.getenv("GOPATH", "/var/snap/go/common") + env["GOCACHE"] = go_cache + env["GOPATH"] = go_path + Path(go_path).mkdir(parents=True, exist_ok=True) + + # From https://github.com/aquasecurity/kube-bench#installing-from-sources + go_cmd = "{bin} get {pkg} " "github.com/golang/dep/cmd/dep".format( + bin=go_bin, pkg=GO_PKG + ) + try: + subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(go_cmd)) + + go_cmd = "{bin} build -o {out} {base}/src/{pkg}".format( + bin=go_bin, out=BENCH_BIN, base=go_path, pkg=GO_PKG + ) + try: + subprocess.check_call(shlex.split(go_cmd), cwd=go_path, env=env) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(go_cmd)) + else: + # Fetch the release URI and put it in the right place. + archive_path = fetcher.install(source=release) + # NB: We may not know the structure of the archive, but we know the + # directory containing 'kube-bench' belongs in our BENCH_HOME. + _move_matching_parent( + dirpath=archive_path, filename="kube-bench", dest=BENCH_HOME + ) + + # Fetch the config URI and put it in the right place. + archive_dir = fetcher.install(source=config) + # NB: We may not know the structure of the archive, but we know the + # directory containing 'config.yaml' belongs in our BENCH_CFG. + _move_matching_parent(dirpath=archive_dir, filename="config.yaml", dest=BENCH_CFG) + + +def apply(remediations=None): + """Apply remediations to address benchmark failures. + + :param: remediations: either 'conservative' or 'dangerous' + """ + applied_fixes = 0 + danger = True if remediations == "dangerous" else False + db = unitdata.kv() + + json_log = report(log_format="json") + hookenv.log("Loading JSON from: {}".format(json_log)) + try: + with open(json_log, "r") as f: + full_json = json.load(f) + except Exception: + _fail("Failed to load: {}".format(json_log)) + + full_json = full_json.get("Controls")[0] if "Controls" in full_json else full_json + for test in full_json.get("tests", {}): + for result in test.get("results", {}): + test_num = result.get("test_number") + test_remediation = result.get("remediation") + test_status = result.get("status", "") + + if test_status.lower() in ("fail", "warn"): + test_remedy = CONSERVATIVE.get(test_num) + if not test_remedy and danger: + # no conservative remedy, check dangerous if user wants + test_remedy = DANGEROUS.get(test_num) + if isinstance(test_remedy, tuple): + if test_remedy[0] == "manual": + # we don't know how to autofix; log remediation text + hookenv.log( + "Test {}: unable to auto-apply remedy.\n" + "Manual steps:\n{}".format(test_num, test_remediation) + ) + elif test_remedy[0] == "cli": + cmd = shlex.split(test_remedy[1]) + try: + out = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + _fail("Test {}: failed to run: {}".format(test_num, cmd)) + else: + hookenv.log( + "Test {}: applied remedy: {}\n" + "Output: {}".format(test_num, cmd, out) + ) + applied_fixes += 1 + elif test_remedy[0] == "kv": + cfg_key = "cis-" + test_remedy[1] + cfg = db.get(cfg_key) or {} + cfg.update(test_remedy[2]) + db.set(cfg_key, cfg) + + hookenv.log( + "Test {}: updated configuration: {}\n".format(test_num, cfg) + ) + applied_fixes += 1 + else: + hookenv.log("Test {}: remediation is missing".format(test_num)) + + # CLI and KV changes will require a charm restart; do it. + if applied_fixes > 0: + _restart_charm() + + msg = ( + 'Applied {} remediations. Re-run with "apply=none" to generate a ' "new report." + ).format(applied_fixes) + hookenv.action_set({"summary": msg}) + + +def reset(): + """Reset any remediations we applied to unitdata.kv(). + + This action does not track individual remediations to reset. Therefore, + this function unconditionally unsets all 'cis-' prefixed arguments that + this action may have set and restarts the relevant charm. + """ + db = unitdata.kv() + + db.unset("cis-kube-apiserver") + db.unset("cis-kube-scheduler") + db.unset("cis-kube-controller-manager") + db.unset("cis-kubelet") + _restart_charm() + + hookenv.action_set( + { + "summary": ( + "Reset is complete. Re-run with " + '"apply=none" to generate a new report.' + ) + } + ) + + +def report(log_format="text"): + """Run kube-bench and report results. + + By default, save the full plain-text results to our RESULTS_DIR and set + action output with a summary. This function can also save full results in + a machine-friendly json format. + + :param: log_format: String determines if output is text or json + :returns: Path to results log + """ + Path(RESULTS_DIR).mkdir(parents=True, exist_ok=True) + + # Node type is different depending on the charm + app = hookenv.charm_name() or "unknown" + version = "cis-1.23" + if "master" in app: + target = "master" + if "control-plane" in app: + # must refer to this as upstream kube-bench tests do + # wokeignore:rule=master + target = "master" + elif "worker" in app: + target = "node" + elif "etcd" in app: + target = "etcd" + else: + _fail("Unable to determine the target to benchmark: {}".format(app)) + + # Commands and log names are different depending on the format + if log_format == "json": + log_prefix = "results-json-" + verbose_cmd = ( + "{bin} -D {cfg} --benchmark {ver} --json run " "--targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + else: + log_prefix = "results-text-" + verbose_cmd = ( + "{bin} -D {cfg} --benchmark {ver} run " "--targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + + summary_cmd = ( + "{bin} -D {cfg} --benchmark {ver} " + "--noremediations --noresults run --targets {target}" + ).format(bin=BENCH_BIN, cfg=BENCH_CFG, ver=version, target=target) + + # Store full results for future consumption + with tempfile.NamedTemporaryFile( + mode="w+b", prefix=log_prefix, dir=RESULTS_DIR, delete=False + ) as res_file: + try: + subprocess.call( + shlex.split(verbose_cmd), stdout=res_file, stderr=subprocess.DEVNULL + ) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(verbose_cmd)) + else: + # remember the filename for later (and make it readable, why not?) + Path(res_file.name).chmod(0o644) + log = res_file.name + + # When making a summary, we also have a verbose report. Set action output + # so operators can see everything related to this run. + try: + out = subprocess.check_output( + shlex.split(summary_cmd), universal_newlines=True, stderr=subprocess.DEVNULL + ) + except subprocess.CalledProcessError: + _fail("Failed to run: {}".format(summary_cmd)) + else: + fetch_cmd = "juju scp {unit}:{file} .".format( + unit=hookenv.local_unit(), file=log + ) + hookenv.action_set({"cmd": summary_cmd, "report": fetch_cmd, "summary": out}) + + return log or None + + +if __name__ == "__main__": + if not ( + is_flag_set("snap.installed.etcd") + or is_flag_set("kubernetes-master.snaps.installed") + or is_flag_set("kubernetes-control-plane.snaps.installed") + or is_flag_set("kubernetes-worker.snaps.installed") + or is_flag_set("kubernetes-node.snaps.installed") + ): + msg = "Snaps are not yet installed on this unit." + _fail(msg) + + # Validate action params + release = hookenv.action_get("release") or "upstream" + config = hookenv.action_get("config") + if not config: + msg = 'Missing "config" parameter' + _fail(msg) + remediations = hookenv.action_get("apply") + if remediations not in ["none", "conservative", "dangerous", "reset"]: + msg = 'Invalid "apply" parameter: {}'.format(remediations) + _fail(msg) + + # TODO: may want an option to overwrite an existing install + if Path(BENCH_BIN).exists() and Path(BENCH_CFG).exists(): + hookenv.log("{} exists; skipping install".format(BENCH_HOME)) + else: + hookenv.log("Installing benchmark from: {}".format(release)) + install(release, config) + + # Reset, remediate, or report + if remediations == "reset": + hookenv.log("Attempting to remove all remediations") + reset() + elif remediations != "none": + hookenv.log('Applying "{}" remediations'.format(remediations)) + apply(remediations) + else: + hookenv.log("Report only; no remediations were requested") + report(log_format="text") diff --git a/kubernetes-worker/actions/debug b/kubernetes-worker/actions/debug new file mode 100755 index 0000000..8ba160e --- /dev/null +++ b/kubernetes-worker/actions/debug @@ -0,0 +1,102 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import tarfile +import tempfile +import traceback +from contextlib import contextmanager +from datetime import datetime +from charmhelpers.core.hookenv import action_set, local_unit + +archive_dir = None +log_file = None + + +@contextmanager +def archive_context(): + """ Open a context with a new temporary directory. + + When the context closes, the directory is archived, and the archive + location is added to Juju action output. """ + global archive_dir + global log_file + with tempfile.TemporaryDirectory() as temp_dir: + name = "debug-" + datetime.now().strftime("%Y%m%d%H%M%S") + archive_dir = os.path.join(temp_dir, name) + os.makedirs(archive_dir) + with open("%s/debug.log" % archive_dir, "w") as log_file: + yield + os.chdir(temp_dir) + tar_path = "/home/ubuntu/%s.tar.gz" % name + with tarfile.open(tar_path, "w:gz") as f: + f.add(name) + action_set({ + "path": tar_path, + "command": "juju scp %s:%s ." % (local_unit(), tar_path), + "message": " ".join([ + "Archive has been created on unit %s." % local_unit(), + "Use the juju scp command to copy it to your local machine." + ]) + }) + + +def log(msg): + """ Log a message that will be included in the debug archive. + + Must be run within archive_context """ + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + for line in str(msg).splitlines(): + log_file.write(timestamp + " | " + line.rstrip() + "\n") + + +def run_script(script): + """ Run a single script. Must be run within archive_context """ + log("Running script: " + script) + script_dir = os.path.join(archive_dir, script) + os.makedirs(script_dir) + env = os.environ.copy() + env["PYTHONPATH"] = "lib" # allow same imports as reactive code + env["DEBUG_SCRIPT_DIR"] = script_dir + with open(script_dir + "/stdout", "w") as stdout: + with open(script_dir + "/stderr", "w") as stderr: + process = subprocess.Popen( + "debug-scripts/" + script, + stdout=stdout, stderr=stderr, env=env + ) + try: + exit_code = process.wait(timeout=300) + except subprocess.TimeoutExpired: + log("ERROR: still running, terminating") + process.terminate() + try: + exit_code = process.wait(timeout=10) + except subprocess.TimeoutExpired: + log("ERROR: still running, killing") + process.kill() + exit_code = process.wait(timeout=10) + if exit_code != 0: + log("ERROR: %s failed with exit code %d" % (script, exit_code)) + + +def run_all_scripts(): + """ Run all scripts. For the sake of robustness, log and ignore any + exceptions that occur. + + Must be run within archive_context """ + scripts = os.listdir("debug-scripts") + for script in scripts: + try: + run_script(script) + except: + log(traceback.format_exc()) + + +def main(): + """ Open an archive context and run all scripts. """ + with archive_context(): + run_all_scripts() + + +if __name__ == "__main__": + main() diff --git a/kubernetes-worker/actions/microbot b/kubernetes-worker/actions/microbot new file mode 100755 index 0000000..d06151f --- /dev/null +++ b/kubernetes-worker/actions/microbot @@ -0,0 +1,81 @@ +#!/usr/local/sbin/charm-env python3 + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core.hookenv import action_fail, action_get, action_set +from charmhelpers.core.hookenv import unit_public_ip +from charmhelpers.core.templating import render +from charms.reactive import endpoint_from_flag +from subprocess import call, check_output + +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + +context = {} +context['delete'] = action_get('delete') +context['public_address'] = unit_public_ip() +context['registry'] = action_get('registry') +context['replicas'] = action_get('replicas') + +arch = check_output(['dpkg', '--print-architecture']).rstrip() +context['arch'] = arch.decode('utf-8') + +if not context['replicas']: + context['replicas'] = 3 + +# Declare a kubectl template when invoking kubectl +kubectl = ['kubectl', '--kubeconfig=/root/.kube/config'] + +# Remove deployment if requested +if context['delete']: + service_del = kubectl + ['delete', 'svc', 'microbot'] + service_response = call(service_del) + deploy_del = kubectl + ['delete', 'deployment', 'microbot'] + deploy_response = call(deploy_del) + ingress_del = kubectl + ['delete', 'ing', 'microbot-ingress'] + ingress_response = call(ingress_del) + + if ingress_response != 0: + action_set({'microbot-ing': + 'Failed removal of microbot ingress resource.'}) + if deploy_response != 0: + action_set({'microbot-deployment': + 'Failed removal of microbot deployment resource.'}) + if service_response != 0: + action_set({'microbot-service': + 'Failed removal of microbot service resource.'}) + sys.exit(0) + +kube_control = endpoint_from_flag('kube-control.registry_location.available') +if kube_control: + registry_location = kube_control.get_registry_location() + context['registry'] = registry_location + +# Creation request +render('microbot-example.yaml', '/root/cdk/addons/microbot.yaml', + context) + +create_command = kubectl + ['apply', '-f', + '/root/cdk/addons/microbot.yaml'] + +create_response = call(create_command) + +if create_response == 0: + action_set({'address': + 'microbot.{}.nip.io'.format(context['public_address'])}) +else: + action_fail('Failed to apply microbot manifest.') diff --git a/kubernetes-worker/actions/pause b/kubernetes-worker/actions/pause new file mode 100755 index 0000000..f3bcabb --- /dev/null +++ b/kubernetes-worker/actions/pause @@ -0,0 +1,34 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess + +from charms.layer.kubernetes_common import ( + get_node_name, + kubectl, +) + +from charmhelpers.core.hookenv import ( + action_fail, + action_get, + status_set, +) + +# Make sure the kubectl snap can be found +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + +drain_args = ['--ignore-daemonsets'] + +if action_get('delete-local-data'): + drain_args.append('--delete-local-data=true') + +if action_get('force'): + drain_args.append('--force') + +try: + kubectl('drain', get_node_name(), *drain_args) +except subprocess.CalledProcessError as e: + action_fail('{}. See unit logs for details.'.format(str(e))) + raise + +status_set('waiting', 'Kubernetes unit paused') diff --git a/kubernetes-worker/actions/resume b/kubernetes-worker/actions/resume new file mode 100755 index 0000000..a8b6422 --- /dev/null +++ b/kubernetes-worker/actions/resume @@ -0,0 +1,25 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess + +from charms.layer.kubernetes_common import ( + get_node_name, + kubectl, +) + +from charmhelpers.core.hookenv import ( + action_fail, + status_set, +) + +# make sure the kubectl snap can be found +os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') + +try: + kubectl('uncordon', get_node_name()) +except subprocess.CalledProcessError as e: + action_fail('{}. See unit logs for details.'.format(str(e))) + raise + +status_set('active', 'Kubernetes unit resumed') diff --git a/kubernetes-worker/actions/upgrade b/kubernetes-worker/actions/upgrade new file mode 100755 index 0000000..a97c19b --- /dev/null +++ b/kubernetes-worker/actions/upgrade @@ -0,0 +1,5 @@ +#!/bin/sh +set -eux + +charms.reactive set_state kubernetes-worker.snaps.upgrade-specified +exec hooks/config-changed diff --git a/kubernetes-worker/bin/charm-env b/kubernetes-worker/bin/charm-env new file mode 100755 index 0000000..d211ce9 --- /dev/null +++ b/kubernetes-worker/bin/charm-env @@ -0,0 +1,107 @@ +#!/bin/bash + +VERSION="1.0.0" + + +find_charm_dirs() { + # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous. + if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then + if [[ -z "$JUJU_CHARM_DIR" ]]; then + # accept $CHARM_DIR to be more forgiving + export JUJU_CHARM_DIR="$CHARM_DIR" + fi + if [[ -z "$CHARM_DIR" ]]; then + # set CHARM_DIR as well to help with backwards compatibility + export CHARM_DIR="$JUJU_CHARM_DIR" + fi + return + fi + # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate + # (because there's got to be at least one principle) charm directory; + # if there are several, pick the first by alpha order. + agents_dir="/var/lib/juju/agents" + if [[ -d "$agents_dir" ]]; then + desired_charm="$1" + found_charm_dir="" + if [[ -n "$desired_charm" ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')" + if [[ "$charm_name" == "$desired_charm" ]]; then + if [[ -n "$found_charm_dir" ]]; then + >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + found_charm_dir="$charm_dir" + fi + done + if [[ -z "$found_charm_dir" ]]; then + >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context" + exit 1 + fi + export JUJU_CHARM_DIR="$found_charm_dir" + export CHARM_DIR="$found_charm_dir" + return + fi + # shellcheck disable=SC2126 + non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)" + if [[ "$non_subordinates" -gt 1 ]]; then + >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context' + exit 1 + elif [[ "$non_subordinates" -eq 1 ]]; then + for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do + if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then + continue + fi + export JUJU_CHARM_DIR="$charm_dir" + export CHARM_DIR="$charm_dir" + return + done + fi + fi + >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context' + exit 1 +} + +try_activate_venv() { + if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then + . "$JUJU_CHARM_DIR/../.venv/bin/activate" + fi +} + +find_wrapped() { + PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")" +} + + +if [[ "$1" == "--version" || "$1" == "-v" ]]; then + echo "$VERSION" + exit 0 +fi + + +# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous +# NB: --charm option must come first +# NB: option must be processed outside find_charm_dirs to modify $@ +charm_name="" +if [[ "$1" == "--charm" ]]; then + charm_name="$2" + shift; shift +fi + +find_charm_dirs "$charm_name" +try_activate_venv +export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH" + +if [[ "$(basename "$0")" == "charm-env" ]]; then + # being used as a shebang + exec "$@" +elif [[ "$0" == "$BASH_SOURCE" ]]; then + # being invoked as a symlink wrapping something to find in the venv + exec "$(find_wrapped)" "$@" +elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then + # being sourced directly; do nothing + /bin/true +else + # being sourced for wrapped bash helpers + . "$(find_wrapped)" +fi diff --git a/kubernetes-worker/bin/layer_option b/kubernetes-worker/bin/layer_option new file mode 100755 index 0000000..3253ef8 --- /dev/null +++ b/kubernetes-worker/bin/layer_option @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import sys +import argparse +from charms import layer + + +parser = argparse.ArgumentParser(description='Access layer options.') +parser.add_argument('section', + help='the section, or layer, the option is from') +parser.add_argument('option', + help='the option to access') + +args = parser.parse_args() +value = layer.options.get(args.section, args.option) +if isinstance(value, bool): + sys.exit(0 if value else 1) +elif isinstance(value, list): + for val in value: + print(val) +else: + print(value) diff --git a/kubernetes-worker/build-cni-resources.sh b/kubernetes-worker/build-cni-resources.sh new file mode 100755 index 0000000..0da8f1e --- /dev/null +++ b/kubernetes-worker/build-cni-resources.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -eux + +# When changing CNI_VERSION, it should be updated in both +# charm-kubernetes-control-plane/build-cni-resources.sh and +# charm-kubernetes-worker/build-cni-resources.sh +CNI_VERSION="${CNI_VERSION:-v0.7.5}" +ARCH="${ARCH:-amd64 arm64 s390x}" + +build_script_commit="$(git show --oneline -q)" +temp_dir="$(readlink -f build-cni-resources.tmp)" +rm -rf "$temp_dir" +mkdir "$temp_dir" +(cd "$temp_dir" + git clone https://github.com/containernetworking/plugins.git cni-plugins \ + --branch "$CNI_VERSION" \ + --depth 1 + + # Grab the user id and group id of this current user. + GROUP_ID=$(id -g) + USER_ID=$(id -u) + + for arch in $ARCH; do + echo "Building cni $CNI_VERSION for $arch" + rm -f cni-plugins/bin/* + docker run \ + --rm \ + -e GOOS=linux \ + -e GOARCH="$arch" \ + -v "$temp_dir"/cni-plugins:/cni \ + golang:1.15 \ + /bin/bash -c "cd /cni && ./build.sh && chown -R ${USER_ID}:${GROUP_ID} /cni" + + (cd cni-plugins/bin + echo "cni-$arch $CNI_VERSION" >> BUILD_INFO + echo "Built $(date)" >> BUILD_INFO + echo "build script commit: $build_script_commit" >> BUILD_INFO + echo "cni-plugins commit: $(git show --oneline -q)" >> BUILD_INFO + tar -czf "$temp_dir/cni-$arch.tgz" . + ) + done +) +mv "$temp_dir"/cni-*.tgz . +rm -rf "$temp_dir" diff --git a/kubernetes-worker/config.yaml b/kubernetes-worker/config.yaml new file mode 100644 index 0000000..045de3b --- /dev/null +++ b/kubernetes-worker/config.yaml @@ -0,0 +1,203 @@ +"options": + "extra_packages": + "description": > + Space separated list of extra deb packages to install. + "type": "string" + "default": "" + "package_status": + "default": "install" + "type": "string" + "description": > + The status of service-affecting packages will be set to this + value in the dpkg database. Valid values are "install" and "hold". + "install_sources": + "description": > + List of extra apt sources, per charm-helpers standard + format (a yaml list of strings encoded as a string). Each source + may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + "type": "string" + "default": "" + "install_keys": + "description": > + List of signing keys for install_sources package sources, per + charmhelpers standard format (a yaml list of strings encoded as + a string). The keys should be the full ASCII armoured GPG public + keys. While GPG key ids are also supported and looked up on a + keyserver, operators should be aware that this mechanism is + insecure. null can be used if a standard package signing key is + used that will already be installed on the machine, and for PPA + sources where the package signing key is securely retrieved from + Launchpad. + "type": "string" + "default": "" + "snapd_refresh": + "default": "max" + "type": "string" + "description": | + How often snapd handles updates for installed snaps. Setting an empty + string will check 4x per day. Set to "max" to delay the refresh as long + as possible. You may also set a custom string as described in the + 'refresh.timer' section here: + https://forum.snapcraft.io/t/system-options/87 + "nagios_context": + "default": "juju" + "type": "string" + "description": | + Used by the nrpe subordinate charms. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + "nagios_servicegroups": + "default": "" + "type": "string" + "description": | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup + "sysctl": + "type": "string" + "default": "{net.ipv4.conf.all.forwarding: 1, net.ipv4.conf.all.rp_filter: 1,\ + \ net.ipv4.neigh.default.gc_thresh1: 128, net.ipv4.neigh.default.gc_thresh2:\ + \ 28672, net.ipv4.neigh.default.gc_thresh3: 32768, net.ipv6.neigh.default.gc_thresh1:\ + \ 128, net.ipv6.neigh.default.gc_thresh2: 28672, net.ipv6.neigh.default.gc_thresh3:\ + \ 32768, fs.inotify.max_user_instances: 8192, fs.inotify.max_user_watches: 1048576,\ + \ kernel.panic: 10, kernel.panic_on_oops: 1, vm.overcommit_memory: 1}" + "description": | + YAML formatted associative array of sysctl values, e.g.: + '{kernel.pid_max: 4194303}'. Note that kube-proxy handles + the conntrack settings. The proper way to alter them is to + use the proxy-extra-args config to set them, e.g.: + juju config kubernetes-control-plane proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore + kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between + the setting of conntrack-max-per-core vs nf_conntrack_max. + "proxy-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kube-proxy. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + "kubelet-extra-args": + "type": "string" + "default": "" + "description": | + Space separated list of flags and key=value pairs that will be passed as arguments to + kubelet. For example a value like this: + runtime-config=batch/v2alpha1=true profiling=true + will result in kubelet being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + Note: As of Kubernetes 1.10.x, many of Kubelet's args have been deprecated, and can + be set with kubelet-extra-config instead. + "kubelet-extra-config": + "default": "{}" + "type": "string" + "description": | + Extra configuration to be passed to kubelet. Any values specified in this + config will be merged into a KubeletConfiguration file that is passed to + the kubelet service via the --config flag. This can be used to override + values provided by the charm. + + Requires Kubernetes 1.10+. + + The value for this config must be a YAML mapping that can be safely + merged with a KubeletConfiguration file. For example: + {evictionHard: {memory.available: 200Mi}} + + For more information about KubeletConfiguration, see upstream docs: + https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ + "labels": + "type": "string" + "default": "" + "description": | + Labels can be used to organize and to select subsets of nodes in the + cluster. Declare node labels in key=value format, separated by spaces. + "ingress": + "type": "boolean" + "default": !!bool "true" + "description": | + Deploy the default http backend and ingress controller to handle + ingress requests. + + Set to false if deploying an alternate ingress controller, and note + that you may need to manually open ports 80 and 443 on the nodes: + juju run --application kubernetes-worker -- open-port 80 && open-port 443 + "channel": + "type": "string" + "default": "1.24/stable" + "description": | + Snap channel to install Kubernetes worker services from + "require-manual-upgrade": + "type": "boolean" + "default": !!bool "true" + "description": | + When true, worker services will not be upgraded until the user triggers + it manually by running the upgrade action. + "ingress-default-ssl-certificate": + "type": "string" + "default": "" + "description": | + SSL certificate to be used by the default HTTPS server. If one of the + flag ingress-default-ssl-certificate or ingress-default-ssl-key is not + provided ingress will use a self-signed certificate. This parameter is + specific to nginx-ingress-controller. + "ingress-default-ssl-key": + "type": "string" + "default": "" + "description": | + Private key to be used by the default HTTPS server. If one of the flag + ingress-default-ssl-certificate or ingress-default-ssl-key is not + provided ingress will use a self-signed certificate. This parameter is + specific to nginx-ingress-controller. + "ingress-ssl-passthrough": + "type": "boolean" + "default": !!bool "false" + "description": | + Enable ssl passthrough on ingress server. This allows passing the ssl + connection through to the workloads and not terminating it at the ingress + controller. + "ingress-ssl-chain-completion": + "type": "boolean" + "default": !!bool "false" + "description": | + Enable chain completion for TLS certificates used by the nginx ingress + controller. Set this to true if you would like the ingress controller + to attempt auto-retrieval of intermediate certificates. The default + (false) is recommended for all production kubernetes installations, and + any environment which does not have outbound Internet access. + "ingress-use-forwarded-headers": + "type": "boolean" + "default": !!bool "false" + "description": | + If true, NGINX passes the incoming X-Forwarded-* headers to upstreams. Use this + option when NGINX is behind another L7 proxy / load balancer that is setting + these headers. + + If false, NGINX ignores incoming X-Forwarded-* headers, filling them with the + request information it sees. Use this option if NGINX is exposed directly to + the internet, or it's behind a L3/packet-based load balancer that doesn't alter + the source IP in the packets. + + Reference: https://github.com/kubernetes/ingress-nginx/blob/a9c706be12a8be418c49ab1f60a02f52f9b14e55/ + docs/user-guide/nginx-configuration/configmap.md#use-forwarded-headers. + "nginx-image": + "type": "string" + "default": "auto" + "description": | + Docker image to use for the nginx ingress controller. Using "auto" will select + an image based on architecture. + + Example: + quay.io/kubernetes-ingress-controller/nginx-ingress-controller-amd64:0.32.0 + "default-backend-image": + "type": "string" + "default": "auto" + "description": | + Docker image to use for the default backend. Auto will select an image + based on architecture. diff --git a/kubernetes-worker/copyright b/kubernetes-worker/copyright new file mode 100644 index 0000000..ac5e525 --- /dev/null +++ b/kubernetes-worker/copyright @@ -0,0 +1,13 @@ +Copyright 2016 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/copyright.layer-apt b/kubernetes-worker/copyright.layer-apt new file mode 100644 index 0000000..0814dc1 --- /dev/null +++ b/kubernetes-worker/copyright.layer-apt @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Apt layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-worker/copyright.layer-basic b/kubernetes-worker/copyright.layer-basic new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubernetes-worker/copyright.layer-basic @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/copyright.layer-coordinator b/kubernetes-worker/copyright.layer-coordinator new file mode 100644 index 0000000..b8518aa --- /dev/null +++ b/kubernetes-worker/copyright.layer-coordinator @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Coordinator Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-worker/copyright.layer-leadership b/kubernetes-worker/copyright.layer-leadership new file mode 100644 index 0000000..08b983f --- /dev/null +++ b/kubernetes-worker/copyright.layer-leadership @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Leadership Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubernetes-worker/copyright.layer-metrics b/kubernetes-worker/copyright.layer-metrics new file mode 100644 index 0000000..2df15bd --- /dev/null +++ b/kubernetes-worker/copyright.layer-metrics @@ -0,0 +1,13 @@ +Copyright 2016 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/copyright.layer-nagios b/kubernetes-worker/copyright.layer-nagios new file mode 100644 index 0000000..c80db95 --- /dev/null +++ b/kubernetes-worker/copyright.layer-nagios @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2016, Canonical Ltd. +License: GPL-3 + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License version 3, as + published by the Free Software Foundation. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranties of + MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR + PURPOSE. See the GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . diff --git a/kubernetes-worker/copyright.layer-options b/kubernetes-worker/copyright.layer-options new file mode 100644 index 0000000..d4fdd18 --- /dev/null +++ b/kubernetes-worker/copyright.layer-options @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/copyright.layer-snap b/kubernetes-worker/copyright.layer-snap new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-worker/copyright.layer-snap @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/copyright.layer-status b/kubernetes-worker/copyright.layer-status new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/copyright.layer-status @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/debug-scripts/charm-unitdata b/kubernetes-worker/debug-scripts/charm-unitdata new file mode 100755 index 0000000..d2aac60 --- /dev/null +++ b/kubernetes-worker/debug-scripts/charm-unitdata @@ -0,0 +1,12 @@ +#!/usr/local/sbin/charm-env python3 + +import debug_script +import json +from charmhelpers.core import unitdata + +kv = unitdata.kv() +data = kv.getrange("") + +with debug_script.open_file("unitdata.json", "w") as f: + json.dump(data, f, indent=2) + f.write("\n") diff --git a/kubernetes-worker/debug-scripts/filesystem b/kubernetes-worker/debug-scripts/filesystem new file mode 100755 index 0000000..c5ec6d8 --- /dev/null +++ b/kubernetes-worker/debug-scripts/filesystem @@ -0,0 +1,17 @@ +#!/bin/sh +set -ux + +# report file system disk space usage +df -hT > $DEBUG_SCRIPT_DIR/df-hT +# estimate file space usage +du -h / 2>&1 > $DEBUG_SCRIPT_DIR/du-h +# list the mounted filesystems +mount > $DEBUG_SCRIPT_DIR/mount +# list the mounted systems with ascii trees +findmnt -A > $DEBUG_SCRIPT_DIR/findmnt +# list block devices +lsblk > $DEBUG_SCRIPT_DIR/lsblk +# list open files +lsof 2>&1 > $DEBUG_SCRIPT_DIR/lsof +# list local system locks +lslocks > $DEBUG_SCRIPT_DIR/lslocks diff --git a/kubernetes-worker/debug-scripts/inotify b/kubernetes-worker/debug-scripts/inotify new file mode 100755 index 0000000..350e20f --- /dev/null +++ b/kubernetes-worker/debug-scripts/inotify @@ -0,0 +1,8 @@ +#!/bin/sh +set -ux + +# We had to bump inotify limits once in the past, hence why this oddly specific +# script lives here in kubernetes-worker. + +sysctl fs.inotify > $DEBUG_SCRIPT_DIR/sysctl-limits +ls -l /proc/*/fd/* | grep inotify > $DEBUG_SCRIPT_DIR/inotify-instances diff --git a/kubernetes-worker/debug-scripts/juju-logs b/kubernetes-worker/debug-scripts/juju-logs new file mode 100755 index 0000000..d27c458 --- /dev/null +++ b/kubernetes-worker/debug-scripts/juju-logs @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +cp -v /var/log/juju/* $DEBUG_SCRIPT_DIR diff --git a/kubernetes-worker/debug-scripts/juju-network-get b/kubernetes-worker/debug-scripts/juju-network-get new file mode 100755 index 0000000..983c8c4 --- /dev/null +++ b/kubernetes-worker/debug-scripts/juju-network-get @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import subprocess +import yaml +import debug_script + +with open('metadata.yaml') as f: + metadata = yaml.load(f) + +relations = [] +for key in ['requires', 'provides', 'peers']: + relations += list(metadata.get(key, {}).keys()) + +os.mkdir(os.path.join(debug_script.dir, 'relations')) + +for relation in relations: + path = 'relations/' + relation + with debug_script.open_file(path, 'w') as f: + cmd = ['network-get', relation] + subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT) diff --git a/kubernetes-worker/debug-scripts/kubectl b/kubernetes-worker/debug-scripts/kubectl new file mode 100755 index 0000000..1192c3c --- /dev/null +++ b/kubernetes-worker/debug-scripts/kubectl @@ -0,0 +1,15 @@ +#!/bin/sh +set -ux + +export PATH=$PATH:/snap/bin + +alias kubectl="kubectl --kubeconfig=/root/cdk/kubeconfig" + +kubectl cluster-info > $DEBUG_SCRIPT_DIR/cluster-info +kubectl cluster-info dump > $DEBUG_SCRIPT_DIR/cluster-info-dump +for obj in pods svc ingress secrets pv pvc rc; do + kubectl describe $obj --all-namespaces > $DEBUG_SCRIPT_DIR/describe-$obj +done +for obj in nodes; do + kubectl describe $obj > $DEBUG_SCRIPT_DIR/describe-$obj +done diff --git a/kubernetes-worker/debug-scripts/kubernetes-worker-services b/kubernetes-worker/debug-scripts/kubernetes-worker-services new file mode 100755 index 0000000..4f9dfa2 --- /dev/null +++ b/kubernetes-worker/debug-scripts/kubernetes-worker-services @@ -0,0 +1,9 @@ +#!/bin/sh +set -ux + +for service in kubelet kube-proxy; do + systemctl status snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-systemctl-status + journalctl -u snap.$service.daemon > $DEBUG_SCRIPT_DIR/$service-journal +done + +# FIXME: get the snap config or something diff --git a/kubernetes-worker/debug-scripts/network b/kubernetes-worker/debug-scripts/network new file mode 100755 index 0000000..944a355 --- /dev/null +++ b/kubernetes-worker/debug-scripts/network @@ -0,0 +1,11 @@ +#!/bin/sh +set -ux + +ifconfig -a > $DEBUG_SCRIPT_DIR/ifconfig +cp -v /etc/resolv.conf $DEBUG_SCRIPT_DIR/resolv.conf +cp -v /etc/network/interfaces $DEBUG_SCRIPT_DIR/interfaces +netstat -planut > $DEBUG_SCRIPT_DIR/netstat +route -n > $DEBUG_SCRIPT_DIR/route +iptables-save > $DEBUG_SCRIPT_DIR/iptables-save +dig google.com > $DEBUG_SCRIPT_DIR/dig-google +ping -w 2 -i 0.1 google.com > $DEBUG_SCRIPT_DIR/ping-google diff --git a/kubernetes-worker/debug-scripts/packages b/kubernetes-worker/debug-scripts/packages new file mode 100755 index 0000000..b60a9cf --- /dev/null +++ b/kubernetes-worker/debug-scripts/packages @@ -0,0 +1,7 @@ +#!/bin/sh +set -ux + +dpkg --list > $DEBUG_SCRIPT_DIR/dpkg-list +snap list > $DEBUG_SCRIPT_DIR/snap-list +pip2 list > $DEBUG_SCRIPT_DIR/pip2-list +pip3 list > $DEBUG_SCRIPT_DIR/pip3-list diff --git a/kubernetes-worker/debug-scripts/sysctl b/kubernetes-worker/debug-scripts/sysctl new file mode 100755 index 0000000..a86a6c8 --- /dev/null +++ b/kubernetes-worker/debug-scripts/sysctl @@ -0,0 +1,4 @@ +#!/bin/sh +set -ux + +sysctl -a > $DEBUG_SCRIPT_DIR/sysctl diff --git a/kubernetes-worker/debug-scripts/systemd b/kubernetes-worker/debug-scripts/systemd new file mode 100755 index 0000000..8bb9b6f --- /dev/null +++ b/kubernetes-worker/debug-scripts/systemd @@ -0,0 +1,9 @@ +#!/bin/sh +set -ux + +systemctl --all > $DEBUG_SCRIPT_DIR/systemctl +journalctl > $DEBUG_SCRIPT_DIR/journalctl +systemd-analyze time > $DEBUG_SCRIPT_DIR/systemd-analyze-time +systemd-analyze blame > $DEBUG_SCRIPT_DIR/systemd-analyze-blame +systemd-analyze critical-chain > $DEBUG_SCRIPT_DIR/systemd-analyze-critical-chain +systemd-analyze dump > $DEBUG_SCRIPT_DIR/systemd-analyze-dump diff --git a/kubernetes-worker/debug-scripts/tls-certs b/kubernetes-worker/debug-scripts/tls-certs new file mode 100755 index 0000000..2692e51 --- /dev/null +++ b/kubernetes-worker/debug-scripts/tls-certs @@ -0,0 +1,21 @@ +#!/usr/local/sbin/charm-env python3 + +import os +import shutil +import traceback +import debug_script +from charms import layer + +options = layer.options.get('tls-client') + +def copy_cert(source_key, name): + try: + source = options[source_key] + dest = os.path.join(debug_script.dir, name) + shutil.copy(source, dest) + except Exception: + traceback.print_exc() + +copy_cert('client_certificate_path', 'client.crt') +copy_cert('server_certificate_path', 'server.crt') +copy_cert('ca_certificate_path', 'ca.crt') diff --git a/kubernetes-worker/docs/status.md b/kubernetes-worker/docs/status.md new file mode 100644 index 0000000..c6cceab --- /dev/null +++ b/kubernetes-worker/docs/status.md @@ -0,0 +1,91 @@ +

WorkloadState

+ +```python +WorkloadState(self, /, *args, **kwargs) +``` + +Enum of the valid workload states. + +Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + +

maintenance

+ +```python +maintenance(message) +``` + +Set the status to the `MAINTENANCE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

maint

+ +```python +maint(message) +``` + +Shorthand alias for +[maintenance](status.md#charms.layer.status.maintenance). + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

blocked

+ +```python +blocked(message) +``` + +Set the status to the `BLOCKED` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

waiting

+ +```python +waiting(message) +``` + +Set the status to the `WAITING` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

active

+ +```python +active(message) +``` + +Set the status to the `ACTIVE` state with the given operator message. + +__Parameters__ + +- __`message` (str)__: Message to convey to the operator. + +

status_set

+ +```python +status_set(workload_state, message) +``` + +Set the status to the given workload state with a message. + +__Parameters__ + +- __`workload_state` (WorkloadState or str)__: State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. +- __`message` (str)__: Message to convey to the operator. + diff --git a/kubernetes-worker/exec.d/docker-compose/charm-pre-install b/kubernetes-worker/exec.d/docker-compose/charm-pre-install new file mode 100644 index 0000000..f0202c5 --- /dev/null +++ b/kubernetes-worker/exec.d/docker-compose/charm-pre-install @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# This stubs out charm-pre-install coming from layer-docker as a workaround for +# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed. diff --git a/kubernetes-worker/exec.d/vmware-patch/charm-pre-install b/kubernetes-worker/exec.d/vmware-patch/charm-pre-install new file mode 100755 index 0000000..b5e6d97 --- /dev/null +++ b/kubernetes-worker/exec.d/vmware-patch/charm-pre-install @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +MY_HOSTNAME=$(hostname) + +: ${JUJU_UNIT_NAME:=`uuidgen`} + + +if [ "${MY_HOSTNAME}" == "ubuntuguest" ]; then + juju-log "Detected broken vsphere integration. Applying hostname override" + + FRIENDLY_HOSTNAME=$(echo $JUJU_UNIT_NAME | tr / -) + juju-log "Setting hostname to $FRIENDLY_HOSTNAME" + if [ ! -f /etc/hostname.orig ]; then + mv /etc/hostname /etc/hostname.orig + fi + echo "${FRIENDLY_HOSTNAME}" > /etc/hostname + hostname $FRIENDLY_HOSTNAME +fi diff --git a/kubernetes-worker/hooks/aws-relation-broken b/kubernetes-worker/hooks/aws-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/aws-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/aws-relation-changed b/kubernetes-worker/hooks/aws-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/aws-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/aws-relation-created b/kubernetes-worker/hooks/aws-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/aws-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/aws-relation-departed b/kubernetes-worker/hooks/aws-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/aws-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/aws-relation-joined b/kubernetes-worker/hooks/aws-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/aws-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/azure-relation-broken b/kubernetes-worker/hooks/azure-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/azure-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/azure-relation-changed b/kubernetes-worker/hooks/azure-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/azure-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/azure-relation-created b/kubernetes-worker/hooks/azure-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/azure-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/azure-relation-departed b/kubernetes-worker/hooks/azure-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/azure-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/azure-relation-joined b/kubernetes-worker/hooks/azure-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/azure-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/certificates-relation-broken b/kubernetes-worker/hooks/certificates-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/certificates-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/certificates-relation-changed b/kubernetes-worker/hooks/certificates-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/certificates-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/certificates-relation-created b/kubernetes-worker/hooks/certificates-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/certificates-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/certificates-relation-departed b/kubernetes-worker/hooks/certificates-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/certificates-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/certificates-relation-joined b/kubernetes-worker/hooks/certificates-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/certificates-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/cni-relation-broken b/kubernetes-worker/hooks/cni-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/cni-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/cni-relation-changed b/kubernetes-worker/hooks/cni-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/cni-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/cni-relation-created b/kubernetes-worker/hooks/cni-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/cni-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/cni-relation-departed b/kubernetes-worker/hooks/cni-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/cni-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/cni-relation-joined b/kubernetes-worker/hooks/cni-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/cni-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/collect-metrics b/kubernetes-worker/hooks/collect-metrics new file mode 100755 index 0000000..8a27863 --- /dev/null +++ b/kubernetes-worker/hooks/collect-metrics @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +import yaml +import os +from subprocess import check_output, check_call, CalledProcessError + + +def build_command(doc): + values = {} + metrics = doc.get("metrics", {}) + for metric, mdoc in metrics.items(): + if not mdoc: + continue + cmd = mdoc.get("command") + if cmd: + try: + value = check_output(cmd, shell=True, universal_newlines=True) + except CalledProcessError as e: + check_call(['juju-log', '-lERROR', + 'Error collecting metric {}:\n{}'.format( + metric, e.output)]) + continue + value = value.strip() + if value: + values[metric] = value + + if not values: + return None + command = ["add-metric"] + for metric, value in values.items(): + command.append("%s=%s" % (metric, value)) + return command + + +if __name__ == '__main__': + charm_dir = os.path.dirname(os.path.abspath(os.path.join(__file__, ".."))) + metrics_yaml = os.path.join(charm_dir, "metrics.yaml") + with open(metrics_yaml) as f: + doc = yaml.load(f) + command = build_command(doc) + if command: + check_call(command) diff --git a/kubernetes-worker/hooks/config-changed b/kubernetes-worker/hooks/config-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/config-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/container-runtime-relation-broken b/kubernetes-worker/hooks/container-runtime-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/container-runtime-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/container-runtime-relation-changed b/kubernetes-worker/hooks/container-runtime-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/container-runtime-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/container-runtime-relation-created b/kubernetes-worker/hooks/container-runtime-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/container-runtime-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/container-runtime-relation-departed b/kubernetes-worker/hooks/container-runtime-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/container-runtime-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/container-runtime-relation-joined b/kubernetes-worker/hooks/container-runtime-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/container-runtime-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/coordinator-relation-broken b/kubernetes-worker/hooks/coordinator-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/coordinator-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/coordinator-relation-changed b/kubernetes-worker/hooks/coordinator-relation-changed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-worker/hooks/coordinator-relation-changed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-worker/hooks/coordinator-relation-created b/kubernetes-worker/hooks/coordinator-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/coordinator-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/coordinator-relation-departed b/kubernetes-worker/hooks/coordinator-relation-departed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-worker/hooks/coordinator-relation-departed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-worker/hooks/coordinator-relation-joined b/kubernetes-worker/hooks/coordinator-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/coordinator-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/gcp-relation-broken b/kubernetes-worker/hooks/gcp-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/gcp-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/gcp-relation-changed b/kubernetes-worker/hooks/gcp-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/gcp-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/gcp-relation-created b/kubernetes-worker/hooks/gcp-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/gcp-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/gcp-relation-departed b/kubernetes-worker/hooks/gcp-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/gcp-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/gcp-relation-joined b/kubernetes-worker/hooks/gcp-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/gcp-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/hook.template b/kubernetes-worker/hooks/hook.template new file mode 100644 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/hook.template @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-broken b/kubernetes-worker/hooks/ingress-proxy-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/ingress-proxy-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-changed b/kubernetes-worker/hooks/ingress-proxy-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/ingress-proxy-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-created b/kubernetes-worker/hooks/ingress-proxy-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/ingress-proxy-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-departed b/kubernetes-worker/hooks/ingress-proxy-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/ingress-proxy-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/ingress-proxy-relation-joined b/kubernetes-worker/hooks/ingress-proxy-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/ingress-proxy-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/install b/kubernetes-worker/hooks/install new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/install @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-broken b/kubernetes-worker/hooks/kube-api-endpoint-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-changed b/kubernetes-worker/hooks/kube-api-endpoint-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-created b/kubernetes-worker/hooks/kube-api-endpoint-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-departed b/kubernetes-worker/hooks/kube-api-endpoint-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-api-endpoint-relation-joined b/kubernetes-worker/hooks/kube-api-endpoint-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-api-endpoint-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-control-relation-broken b/kubernetes-worker/hooks/kube-control-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-control-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-control-relation-changed b/kubernetes-worker/hooks/kube-control-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-control-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-control-relation-created b/kubernetes-worker/hooks/kube-control-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-control-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-control-relation-departed b/kubernetes-worker/hooks/kube-control-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-control-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/kube-control-relation-joined b/kubernetes-worker/hooks/kube-control-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/kube-control-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/leader-elected b/kubernetes-worker/hooks/leader-elected new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-worker/hooks/leader-elected @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-worker/hooks/leader-settings-changed b/kubernetes-worker/hooks/leader-settings-changed new file mode 100755 index 0000000..fe39f65 --- /dev/null +++ b/kubernetes-worker/hooks/leader-settings-changed @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Load modules from $CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer.basic import bootstrap_charm_deps +bootstrap_charm_deps() + + +# This will load and run the appropriate @hook and other decorated +# handlers from $CHARM_DIR/reactive, $CHARM_DIR/hooks/reactive, +# and $CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main +main() diff --git a/kubernetes-worker/hooks/nfs-relation-broken b/kubernetes-worker/hooks/nfs-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nfs-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nfs-relation-changed b/kubernetes-worker/hooks/nfs-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nfs-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nfs-relation-created b/kubernetes-worker/hooks/nfs-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nfs-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nfs-relation-departed b/kubernetes-worker/hooks/nfs-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nfs-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nfs-relation-joined b/kubernetes-worker/hooks/nfs-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nfs-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-broken b/kubernetes-worker/hooks/nrpe-external-master-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nrpe-external-master-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-changed b/kubernetes-worker/hooks/nrpe-external-master-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nrpe-external-master-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-created b/kubernetes-worker/hooks/nrpe-external-master-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nrpe-external-master-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-departed b/kubernetes-worker/hooks/nrpe-external-master-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nrpe-external-master-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/nrpe-external-master-relation-joined b/kubernetes-worker/hooks/nrpe-external-master-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/nrpe-external-master-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/openstack-relation-broken b/kubernetes-worker/hooks/openstack-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/openstack-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/openstack-relation-changed b/kubernetes-worker/hooks/openstack-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/openstack-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/openstack-relation-created b/kubernetes-worker/hooks/openstack-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/openstack-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/openstack-relation-departed b/kubernetes-worker/hooks/openstack-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/openstack-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/openstack-relation-joined b/kubernetes-worker/hooks/openstack-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/openstack-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/post-series-upgrade b/kubernetes-worker/hooks/post-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/post-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/pre-series-upgrade b/kubernetes-worker/hooks/pre-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/pre-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/relations/aws-integration/.gitignore b/kubernetes-worker/hooks/relations/aws-integration/.gitignore new file mode 100644 index 0000000..ba1431e --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/.gitignore @@ -0,0 +1,2 @@ +.tox +__pycache__ diff --git a/kubernetes-worker/hooks/relations/aws-integration/LICENSE b/kubernetes-worker/hooks/relations/aws-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/aws-integration/README.md b/kubernetes-worker/hooks/relations/aws-integration/README.md new file mode 100644 index 0000000..59abfcf --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `aws-integration` interface communciation protocol +and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:aws-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:aws-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `aws-integration` interface protocol: + +```yaml +requires: + aws: + interface: aws-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the aws-integrator charm) diff --git a/kubernetes-worker/hooks/relations/aws-integration/__init__.py b/kubernetes-worker/hooks/relations/aws-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/aws-integration/copyright b/kubernetes-worker/hooks/relations/aws-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md b/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md new file mode 100644 index 0000000..57ecb25 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/docs/provides.md @@ -0,0 +1,179 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the AWS +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for AWS integration features. + The AWS integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

AWSIntegrationProvides

+ +```python +AWSIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.aws.requested') +def handle_requests(): + aws = endpoint_from_flag('endpoint.aws.requested') + for request in aws.requests: + if request.instance_tags: + tag_instance( + request.instance_id, + request.region, + request.instance_tags) + if request.requested_load_balancer_management: + layer.aws.enable_load_balancer_management( + request.application_name, + request.instance_id, + request.region, + ) + # ... + request.mark_completed() +``` + +

application_names

+ + +Set of names of all applications that are still joined. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

unit_instances

+ + +Mapping of unit names to instance IDs and regions for all joined units. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

changed

+ + +Whether this request has changed since the last time it was +marked completed. + +

hash

+ + +SHA hash of the data for this request. + +

instance_id

+ + +The instance ID reported for this request. + +

instance_security_group_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance's +machine-specific security group (firewall). + +

instance_subnet_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance's +subnet. + +

instance_tags

+ + +Mapping of tag names to values (or `None`) to apply to this instance. + +

object_storage_access_patterns

+ + +List of patterns to which to restrict object storage access. + +

object_storage_management_patterns

+ + +List of patterns to which to restrict object storage management. + +

region

+ + +The region reported for this request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_load_balancer_management

+ + +Flag indicating whether load balancer management was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking (firewalls, +subnets, etc) was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

unit_name

+ + +The name of the unit making the request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + +

clear

+ +```python +IntegrationRequest.clear(self) +``` + +Clear this request's cached data. + diff --git a/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md b/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md new file mode 100644 index 0000000..41607f4 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/docs/requires.md @@ -0,0 +1,178 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with AWS native features. The integration will +be provided by the AWS integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of AWS specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific AWS features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the AWS instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

AWSIntegrationRequires

+ +```python +AWSIntegrationRequires(self, *args, **kwargs) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.aws.joined') +def request_aws_integration(): + aws = endpoint_from_flag('endpoint.aws.joined') + aws.request_instance_tags({ + 'tag1': 'value1', + 'tag2': None, + }) + aws.request_load_balancer_management() + # ... + +@when('endpoint.aws.ready') +def aws_integration_ready(): + update_config_enable_aws() +``` + +

instance_id

+ + +This unit's instance-id. + +

region

+ + +The region this unit is in. + +

tag_instance

+ +```python +AWSIntegrationRequires.tag_instance(self, tags) +``` + +Request that the given tags be applied to this instance. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

tag_instance_security_group

+ +```python +AWSIntegrationRequires.tag_instance_security_group(self, tags) +``` + +Request that the given tags be applied to this instance's +machine-specific security group (firewall) created by Juju. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

tag_instance_subnet

+ +```python +AWSIntegrationRequires.tag_instance_subnet(self, tags) +``` + +Request that the given tags be applied to this instance's subnet. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tag names to values (or `None`). + +

enable_acm_readonly

+ +```python +AWSIntegrationRequires.enable_acm_readonly(self) +``` + +Request readonly for ACM. + +

enable_acm_fullaccess

+ +```python +AWSIntegrationRequires.enable_acm_fullaccess(self) +``` + +Request fullaccess for ACM. + +

enable_instance_inspection

+ +```python +AWSIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +AWSIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking (firewalls, subnets, etc). + +

enable_load_balancer_management

+ +```python +AWSIntegrationRequires.enable_load_balancer_management(self) +``` + +Request the ability to manage load balancers. + +

enable_block_storage_management

+ +```python +AWSIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +AWSIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +AWSIntegrationRequires.enable_object_storage_access(self, patterns=None) +``` + +Request the ability to access object storage. + +__Parameters__ + +- __`patterns` (list)__: If given, restrict access to the resources matching + the patterns. If patterns do not start with the S3 ARN prefix +- __(`arn__:aws:s3:::`), it will be prepended. + +

enable_object_storage_management

+ +```python +AWSIntegrationRequires.enable_object_storage_management(self, patterns=None) +``` + +Request the ability to manage object storage. + +__Parameters__ + +- __`patterns` (list)__: If given, restrict management to the resources + matching the patterns. If patterns do not start with the S3 ARN +- __prefix (`arn__:aws:s3:::`), it will be prepended. + diff --git a/kubernetes-worker/hooks/relations/aws-integration/interface.yaml b/kubernetes-worker/hooks/relations/aws-integration/interface.yaml new file mode 100644 index 0000000..fe3da6d --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/interface.yaml @@ -0,0 +1,4 @@ +name: aws-integration +summary: Interface for connecting to the AWS integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-worker/hooks/relations/aws-integration/make_docs b/kubernetes-worker/hooks/relations/aws-integration/make_docs new file mode 100644 index 0000000..72b69c2 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'aws': {'interface': 'aws-integration'}}, + 'provides': {'aws': {'interface': 'aws-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/aws-integration/provides.py b/kubernetes-worker/hooks/relations/aws-integration/provides.py new file mode 100644 index 0000000..ae94211 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/provides.py @@ -0,0 +1,288 @@ +""" +This is the provides side of the interface layer, for use only by the AWS +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for AWS integration features. + The AWS integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +import json +from hashlib import sha256 + +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class AWSIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.aws.requested') + def handle_requests(): + aws = endpoint_from_flag('endpoint.aws.requested') + for request in aws.requests: + if request.instance_tags: + tag_instance( + request.instance_id, + request.region, + request.instance_tags) + if request.requested_load_balancer_management: + layer.aws.enable_load_balancer_management( + request.application_name, + request.instance_id, + request.region, + ) + # ... + request.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + requests = self.requests + toggle_flag(self.expand_name('requested'), len(requests) > 0) + clear_flag(self.expand_name('changed')) + + @when('endpoint.{endpoint_name}.departed') + def cleanup(self): + for unit in self.all_departed_units: + request = IntegrationRequest(unit) + request.clear() + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + return [request for request in self.all_requests if request.changed] + + @property + def all_requests(self): + """ + A list of all the #IntegrationRequests that have been made, + even if unchanged. + """ + return [IntegrationRequest(unit) for unit in self.all_joined_units] + + @property + def application_names(self): + """ + Set of names of all applications that are still joined. + """ + return {unit.application_name for unit in self.all_joined_units} + + @property + def unit_instances(self): + """ + Mapping of unit names to instance IDs and regions for all joined units. + """ + return { + unit.unit_name: { + 'instance-id': unit.received['instance-id'], + 'region': unit.received['region'], + } for unit in self.all_joined_units + } + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + self._hash = sha256(json.dumps(dict(unit.received), + sort_keys=True).encode('utf8') + ).hexdigest() + + @property + def hash(self): + """ + SHA hash of the data for this request. + """ + return self._hash + + @property + def _hash_key(self): + endpoint = self._unit.relation.endpoint + return endpoint.expand_name('request.{}'.format(self.instance_id)) + + @property + def changed(self): + """ + Whether this request has changed since the last time it was + marked completed. + """ + if not (self.instance_id and self._requested): + return False + saved_hash = unitdata.kv().get(self._hash_key) + result = saved_hash != self.hash + return result + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._unit.relation.to_publish.get('completed', {}) + completed[self.instance_id] = self.hash + unitdata.kv().set(self._hash_key, self.hash) + self._unit.relation.to_publish['completed'] = completed + + def clear(self): + """ + Clear this request's cached data. + """ + unitdata.kv().unset(self._hash_key) + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def instance_id(self): + """ + The instance ID reported for this request. + """ + return self._unit.received['instance-id'] + + @property + def region(self): + """ + The region reported for this request. + """ + return self._unit.received['region'] + + @property + def instance_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-tags', {})) + + @property + def instance_security_group_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance's + machine-specific security group (firewall). + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-security-group-tags', + {})) + + @property + def instance_subnet_tags(self): + """ + Mapping of tag names to values (or `None`) to apply to this instance's + subnet. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-subnet-tags', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_acm_readonly(self): + """ + Flag indicating whether acm readonly was requested. + """ + return bool(self._unit.received['enable-acm-readonly']) + + @property + def requested_acm_fullaccess(self): + """ + Flag indicating whether acm fullaccess was requested. + """ + return bool(self._unit.received['enable-acm-fullaccess']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking (firewalls, + subnets, etc) was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_load_balancer_management(self): + """ + Flag indicating whether load balancer management was requested. + """ + return bool(self._unit.received['enable-load-balancer-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def object_storage_access_patterns(self): + """ + List of patterns to which to restrict object storage access. + """ + return list( + self._unit.received['object-storage-access-patterns'] or []) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) + + @property + def object_storage_management_patterns(self): + """ + List of patterns to which to restrict object storage management. + """ + return list( + self._unit.received['object-storage-management-patterns'] or []) diff --git a/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml new file mode 100644 index 0000000..70a2e75 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'AWS Integration Interface' + +generate: + - requires.md: + - requires + - requires.AWSIntegrationRequires+ + - provides.md: + - provides + - provides.AWSIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/aws-integration/requires.py b/kubernetes-worker/hooks/relations/aws-integration/requires.py new file mode 100644 index 0000000..c457e02 --- /dev/null +++ b/kubernetes-worker/hooks/relations/aws-integration/requires.py @@ -0,0 +1,262 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with AWS native features. The integration will +be provided by the AWS integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of AWS specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific AWS features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the AWS instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import json +import string +from hashlib import sha256 +from urllib.parse import urljoin +from urllib.request import urlopen + +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from AWS metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class AWSIntegrationRequires(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.aws.joined') + def request_aws_integration(): + aws = endpoint_from_flag('endpoint.aws.joined') + aws.request_instance_tags({ + 'tag1': 'value1', + 'tag2': None, + }) + aws.request_load_balancer_management() + # ... + + @when('endpoint.aws.ready') + def aws_integration_ready(): + update_config_enable_aws() + ``` + """ + # the IP is the AWS metadata service, documented here: + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + _metadata_url = 'http://169.254.169.254/latest/meta-data/' + _instance_id_url = urljoin(_metadata_url, 'instance-id') + _az_url = urljoin(_metadata_url, 'placement/availability-zone') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._instance_id = None + self._region = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single AWS integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single AWS integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['instance-id'] = self.instance_id + self._to_publish['region'] = self.region + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + completed = self._received.get('completed', {}) + actual_hash = completed.get(self.instance_id) + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), + self._requested and actual_hash == self._expected_hash) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def instance_id(self): + """ + This unit's instance-id. + """ + if self._instance_id is None: + cache_key = self.expand_name('instance-id') + cached = unitdata.kv().get(cache_key) + if cached: + self._instance_id = cached + else: + with urlopen(self._instance_id_url) as fd: + self._instance_id = fd.read(READ_BLOCK_SIZE).decode('utf8') + unitdata.kv().set(cache_key, self._instance_id) + return self._instance_id + + @property + def region(self): + """ + The region this unit is in. + """ + if self._region is None: + cache_key = self.expand_name('region') + cached = unitdata.kv().get(cache_key) + if cached: + self._region = cached + else: + with urlopen(self._az_url) as fd: + az = fd.read(READ_BLOCK_SIZE).decode('utf8') + self._region = az.rstrip(string.ascii_lowercase) + unitdata.kv().set(cache_key, self._region) + return self._region + + @property + def _expected_hash(self): + return sha256(json.dumps(dict(self._to_publish), + sort_keys=True).encode('utf8')).hexdigest() + + @property + def _requested(self): + # whether or not a request has been issued + return self._to_publish['requested'] + + def _request(self, keyvals): + self._to_publish.update(keyvals) + self._to_publish['requested'] = True + clear_flag(self.expand_name('ready')) + + def tag_instance(self, tags): + """ + Request that the given tags be applied to this instance. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-tags': dict(tags)}) + + def tag_instance_security_group(self, tags): + """ + Request that the given tags be applied to this instance's + machine-specific security group (firewall) created by Juju. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-security-group-tags': dict(tags)}) + + def tag_instance_subnet(self, tags): + """ + Request that the given tags be applied to this instance's subnet. + + # Parameters + `tags` (dict): Mapping of tag names to values (or `None`). + """ + self._request({'instance-subnet-tags': dict(tags)}) + + def enable_acm_readonly(self): + """ + Request readonly for ACM. + """ + self._request({'enable-acm-readonly': True}) + + def enable_acm_fullaccess(self): + """ + Request fullaccess for ACM. + """ + self._request({'enable-acm-fullaccess': True}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking (firewalls, subnets, etc). + """ + self._request({'enable-network-management': True}) + + def enable_load_balancer_management(self): + """ + Request the ability to manage load balancers. + """ + self._request({'enable-load-balancer-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns-management': True}) + + def enable_object_storage_access(self, patterns=None): + """ + Request the ability to access object storage. + + # Parameters + `patterns` (list): If given, restrict access to the resources matching + the patterns. If patterns do not start with the S3 ARN prefix + (`arn:aws:s3:::`), it will be prepended. + """ + if patterns: + for i, pattern in enumerate(patterns): + if not pattern.startswith('arn:aws:s3:::'): + patterns[i] = 'arn:aws:s3:::{}'.format(pattern) + self._request({ + 'enable-object-storage-access': True, + 'object-storage-access-patterns': patterns, + }) + + def enable_object_storage_management(self, patterns=None): + """ + Request the ability to manage object storage. + + # Parameters + `patterns` (list): If given, restrict management to the resources + matching the patterns. If patterns do not start with the S3 ARN + prefix (`arn:aws:s3:::`), it will be prepended. + """ + if patterns: + for i, pattern in enumerate(patterns): + if not pattern.startswith('arn:aws:s3:::'): + patterns[i] = 'arn:aws:s3:::{}'.format(pattern) + self._request({ + 'enable-object-storage-management': True, + 'object-storage-management-patterns': patterns, + }) diff --git a/kubernetes-worker/hooks/relations/azure-integration/.gitignore b/kubernetes-worker/hooks/relations/azure-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/azure-integration/LICENSE b/kubernetes-worker/hooks/relations/azure-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/azure-integration/README.md b/kubernetes-worker/hooks/relations/azure-integration/README.md new file mode 100644 index 0000000..ddcae26 --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `azure-integration` interface communciation +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:azure-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:azure-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `azure-integration` interface protocol: + +```yaml +requires: + azure: + interface: azure-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the azure-integrator charm) diff --git a/kubernetes-worker/hooks/relations/azure-integration/__init__.py b/kubernetes-worker/hooks/relations/azure-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/azure-integration/copyright b/kubernetes-worker/hooks/relations/azure-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md b/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md new file mode 100644 index 0000000..4348dff --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/docs/provides.md @@ -0,0 +1,175 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the Azure +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for Azure integration features. + The Azure integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

AzureIntegrationProvides

+ +```python +AzureIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.azure.requests-pending') +def handle_requests(): + azure = endpoint_from_flag('endpoint.azure.requests-pending') + for request in azure.requests: + if request.instance_tags: + layer.azure.tag_instance( + request.vm_name, + request.resource_group, + request.instance_tags) + if request.requested_load_balancer_management: + layer.azure.enable_load_balancer_management( + request.charm, + request.vm_name, + request.resource_group, + ) + # ... + azure.mark_completed() +``` + +

relation_ids

+ + +A list of the IDs of all established relations. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

get_departed_charms

+ +```python +AzureIntegrationProvides.get_departed_charms(self) +``` + +Get a list of all charms that have had all units depart since the +last time this was called. + +

mark_completed

+ +```python +AzureIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

charm

+ + +The charm name reported for this request. + +

instance_tags

+ + +Mapping of tag names to values to apply to this instance. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

model_uuid

+ + +The UUID of the model containing the application making this request. + +

relation_id

+ + +The ID of the relation for the unit making the request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

requested_security_management

+ + +Flag indicating whether security management was requested. + +

resource_group

+ + +The resource group reported for this request. + +

unit_name

+ + +The name of the unit making the request. + +

vm_id

+ + +The instance ID reported for this request. + +

vm_name

+ + +The instance name reported for this request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + diff --git a/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md b/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md new file mode 100644 index 0000000..608b4ee --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/docs/requires.md @@ -0,0 +1,145 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with Azure native features. The integration will +be provided by the Azure integrator charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of Azure specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific Azure features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the Azure instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

AzureIntegrationRequires

+ +```python +AzureIntegrationRequires(self, *args, **kwargs) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.azure.joined') +def request_azure_integration(): + azure = endpoint_from_flag('endpoint.azure.joined') + azure.tag_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + azure.request_load_balancer_management() + # ... + +@when('endpoint.azure.ready') +def azure_integration_ready(): + update_config_enable_azure() +``` + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

resource_group

+ + +The resource group this unit is in. + +

vm_id

+ + +This unit's instance ID. + +

vm_name

+ + +This unit's instance name. + +

tag_instance

+ +```python +AzureIntegrationRequires.tag_instance(self, tags) +``` + +Request that the given tags be applied to this instance. + +__Parameters__ + +- __`tags` (dict)__: Mapping of tags names to values. + +

enable_instance_inspection

+ +```python +AzureIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +AzureIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking. + +

enable_security_management

+ +```python +AzureIntegrationRequires.enable_security_management(self) +``` + +Request the ability to manage security (e.g., firewalls). + +

enable_block_storage_management

+ +```python +AzureIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +AzureIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +AzureIntegrationRequires.enable_object_storage_access(self) +``` + +Request the ability to access object storage. + +

enable_object_storage_management

+ +```python +AzureIntegrationRequires.enable_object_storage_management(self) +``` + +Request the ability to manage object storage. + diff --git a/kubernetes-worker/hooks/relations/azure-integration/interface.yaml b/kubernetes-worker/hooks/relations/azure-integration/interface.yaml new file mode 100644 index 0000000..a77a7cb --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/interface.yaml @@ -0,0 +1,4 @@ +name: azure-integration +summary: Interface for connecting to the Azure integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-worker/hooks/relations/azure-integration/make_docs b/kubernetes-worker/hooks/relations/azure-integration/make_docs new file mode 100644 index 0000000..84df5ee --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'azure': {'interface': 'azure-integration'}}, + 'provides': {'azure': {'interface': 'azure-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/azure-integration/provides.py b/kubernetes-worker/hooks/relations/azure-integration/provides.py new file mode 100644 index 0000000..5ff7d3a --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/provides.py @@ -0,0 +1,275 @@ +""" +This is the provides side of the interface layer, for use only by the Azure +integrator charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for Azure integration features. + The Azure integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class AzureIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.azure.requests-pending') + def handle_requests(): + azure = endpoint_from_flag('endpoint.azure.requests-pending') + for request in azure.requests: + if request.instance_tags: + layer.azure.tag_instance( + request.vm_name, + request.resource_group, + request.instance_tags) + if request.requested_load_balancer_management: + layer.azure.enable_load_balancer_management( + request.charm, + request.vm_name, + request.resource_group, + ) + # ... + azure.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + if not hasattr(self, '_requests'): + all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + is_changed = attrgetter('is_changed') + self._requests = list(filter(is_changed, all_requests)) + return self._requests + + @property + def relation_ids(self): + """ + A list of the IDs of all established relations. + """ + return [relation.relation_id for relation in self.relations] + + def get_departed_charms(self): + """ + Get a list of all charms that have had all units depart since the + last time this was called. + """ + joined_charms = {unit.received['charm'] + for unit in self.all_joined_units + if unit.received['charm']} + departed_charms = [unit.received['charm'] + for unit in self.all_departed_units + if unit.received['charm'] not in joined_charms] + self.all_departed_units.clear() + return departed_charms + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + for request in self.requests: + request.mark_completed() + clear_flag(self.expand_name('requests-pending')) + self._requests = [] + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def _completed(self): + return self._to_publish.get('completed', {}) + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + if not all([self.charm, self.vm_id, self.vm_name, + self.resource_group, self._requested]): + return False + return self._completed.get(self.vm_id) != self._requested + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._completed + completed[self.vm_id] = self._requested + self._to_publish['completed'] = completed # have to explicitly update + + def send_additional_metadata(self, resource_group_location, + vnet_name, vnet_resource_group, + subnet_name, security_group_name, + security_group_resource_group, + use_managed_identity=True, aad_client=None, + aad_secret=None, tenant_id=None): + self._to_publish.update({ + 'resource-group-location': resource_group_location, + 'vnet-name': vnet_name, + 'vnet-resource-group': vnet_resource_group, + 'subnet-name': subnet_name, + 'security-group-name': security_group_name, + 'security-group-resource-group': security_group_resource_group, + 'use-managed-identity': use_managed_identity, + 'aad-client': aad_client, + 'aad-client-secret': aad_secret, + 'tenant-id': tenant_id + }) + + @property + def relation_id(self): + """ + The ID of the relation for the unit making the request. + """ + return self._unit.relation.relation_id + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def charm(self): + """ + The charm name reported for this request. + """ + return self._unit.received['charm'] + + @property + def vm_id(self): + """ + The instance ID reported for this request. + """ + return self._unit.received['vm-id'] + + @property + def vm_name(self): + """ + The instance name reported for this request. + """ + return self._unit.received['vm-name'] + + @property + def resource_group(self): + """ + The resource group reported for this request. + """ + return self._unit.received['res-group'] + + @property + def model_uuid(self): + """ + The UUID of the model containing the application making this request. + """ + return self._unit.received['model-uuid'] + + @property + def instance_tags(self): + """ + Mapping of tag names to values to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-tags', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_loadbalancer_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-loadbalancer-management']) + + + @property + def requested_security_management(self): + """ + Flag indicating whether security management was requested. + """ + return bool(self._unit.received['enable-security-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) diff --git a/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml new file mode 100644 index 0000000..6414c29 --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'Azure Integration Interface' + +generate: + - requires.md: + - requires + - requires.AzureIntegrationRequires+ + - provides.md: + - provides + - provides.AzureIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/azure-integration/requires.py b/kubernetes-worker/hooks/relations/azure-integration/requires.py new file mode 100644 index 0000000..600d69e --- /dev/null +++ b/kubernetes-worker/hooks/relations/azure-integration/requires.py @@ -0,0 +1,298 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with Azure native features. The integration will +be provided by the Azure integrator charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of Azure specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific Azure features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the Azure instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import json +import os +import random +import string +from urllib.request import urlopen, Request + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from Azure metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class AzureIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.azure.joined') + def request_azure_integration(): + azure = endpoint_from_flag('endpoint.azure.joined') + azure.tag_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + azure.request_load_balancer_management() + # ... + + @when('endpoint.azure.ready') + def azure_integration_ready(): + update_config_enable_azure() + ``` + """ + # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + _metadata_url = 'http://169.254.169.254/metadata/instance?api-version=2017-12-01' # noqa + _metadata_headers = {'Metadata': 'true'} + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._vm_metadata = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single Azure integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single Azure integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['charm'] = hookenv.charm_name() + self._to_publish['vm-id'] = self.vm_id + self._to_publish['vm-name'] = self.vm_name + self._to_publish['res-group'] = self.resource_group + self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID'] + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), self.is_ready) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def vm_metadata(self): + if self._vm_metadata is None: + cache_key = self.expand_name('vm-metadata') + cached = unitdata.kv().get(cache_key) + if cached: + self._vm_metadata = cached + else: + req = Request(self._metadata_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + metadata = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._vm_metadata = json.loads(metadata) + unitdata.kv().set(cache_key, self._vm_metadata) + return self._vm_metadata + + @property + def vm_id(self): + """ + This unit's instance ID. + """ + return self.vm_metadata['compute']['vmId'] + + @property + def vm_name(self): + """ + This unit's instance name. + """ + return self.vm_metadata['compute']['name'] + + @property + def vm_location(self): + """ + The location (region) the instance is running in. + """ + return self.vm_metadata['compute']['location'] + + @property + def resource_group(self): + """ + The resource group this unit is in. + """ + return self.vm_metadata['compute']['resourceGroupName'] + + @property + def resource_group_location(self): + """ + The location (region) the resource group is in. + """ + return self._received['resource-group-location'] + + @property + def subscription_id(self): + """ + The ID of the Azure Subscription this unit is in. + """ + return self.vm_metadata['compute']['subscriptionId'] + + @property + def vnet_name(self): + """ + The name of the virtual network the instance is in. + """ + return self._received['vnet-name'] + + @property + def vnet_resource_group(self): + """ + The name of the virtual network the instance is in. + """ + return self._received['vnet-resource-group'] + + @property + def subnet_name(self): + """ + The name of the subnet the instance is in. + """ + return self._received['subnet-name'] + + @property + def security_group_name(self): + """ + The name of the security group attached to the cluster's subnet. + """ + return self._received['security-group-name'] + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + requested = self._to_publish['requested'] + completed = self._received.get('completed', {}).get(self.vm_id) + return requested and requested == completed + + @property + def security_group_resource_group(self): + return self._received['security-group-resource-group'] + + @property + def managed_identity(self): + return self._received['use-managed-identity'] + + @property + def aad_client_id(self): + return self._received['aad-client'] + + @property + def aad_client_secret(self): + return self._received['aad-client-secret'] + + @property + def tenant_id(self): + return self._received['tenant-id'] + + def _request(self, keyvals): + alphabet = string.ascii_letters + string.digits + nonce = ''.join(random.choice(alphabet) for _ in range(8)) + self._to_publish.update(keyvals) + self._to_publish['requested'] = nonce + clear_flag(self.expand_name('ready')) + + def tag_instance(self, tags): + """ + Request that the given tags be applied to this instance. + + # Parameters + `tags` (dict): Mapping of tags names to values. + """ + self._request({'instance-tags': dict(tags)}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-network-management': True}) + + def enable_loadbalancer_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-loadbalancer-management': True}) + + + def enable_security_management(self): + """ + Request the ability to manage security (e.g., firewalls). + """ + self._request({'enable-security-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns': True}) + + def enable_object_storage_access(self): + """ + Request the ability to access object storage. + """ + self._request({'enable-object-storage-access': True}) + + def enable_object_storage_management(self): + """ + Request the ability to manage object storage. + """ + self._request({'enable-object-storage-management': True}) + + diff --git a/kubernetes-worker/hooks/relations/container-runtime/.gitignore b/kubernetes-worker/hooks/relations/container-runtime/.gitignore new file mode 100644 index 0000000..894a44c --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/.gitignore @@ -0,0 +1,104 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/kubernetes-worker/hooks/relations/container-runtime/LICENSE b/kubernetes-worker/hooks/relations/container-runtime/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/container-runtime/README.md b/kubernetes-worker/hooks/relations/container-runtime/README.md new file mode 100644 index 0000000..4620013 --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/README.md @@ -0,0 +1,45 @@ +# interface-container-runtime + +## Overview + +This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime. + +## Usage + +### Provides + +The providing side of the container interface provides a place for a container runtime to connect to. + +Your charm should respond to the `endpoint.{endpoint_name}.available` state, +which indicates that there is a container runtime connected. + +A trivial example of handling this interface would be: + +```python +@when('endpoint.containerd.joined') +def update_kubelet_config(containerd): + endpoint = endpoint_from_flag('endpoint.containerd.joined') + config = endpoint.get_config() + kubelet.config['container-runtime'] = \ + config['runtime'] +``` + +### Requires + +The requiring side of the container interface requires a place for a container runtime to connect to. + +Your charm should set `{endpoint_name}.available` state, +which indicates that the container is runtime connected. + +A trivial example of handling this interface would be: + +```python +@when('endpoint.containerd.joined') +def pubish_config(): + endpoint = endpoint_from_flag('endpoint.containerd.joined') + endpoint.set_config( + socket='unix:///var/run/containerd/containerd.sock', + runtime='remote', + nvidia_enabled=False + ) +``` diff --git a/kubernetes-worker/hooks/relations/container-runtime/__init__.py b/kubernetes-worker/hooks/relations/container-runtime/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/container-runtime/interface.yaml b/kubernetes-worker/hooks/relations/container-runtime/interface.yaml new file mode 100644 index 0000000..294be1e --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/interface.yaml @@ -0,0 +1,4 @@ +name: container-runtime +summary: Interface for relating to container runtimes +version: 1 +maintainer: "Joe Borg " diff --git a/kubernetes-worker/hooks/relations/container-runtime/provides.py b/kubernetes-worker/hooks/relations/container-runtime/provides.py new file mode 100644 index 0000000..a9768a8 --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/provides.py @@ -0,0 +1,55 @@ +from charms.reactive import ( + Endpoint, + toggle_flag +) + + +class ContainerRuntimeProvides(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + + def _get_config(self, key): + """ + Get the published configuration for a given key. + + :param key: String dict key + :return: String value for given key + """ + return self.all_joined_units.received.get(key) + + def get_nvidia_enabled(self): + """ + Get the published nvidia config. + + :return: String + """ + return self._get_config(key='nvidia_enabled') + + def get_runtime(self): + """ + Get the published runtime config. + + :return: String + """ + return self._get_config(key='runtime') + + def get_socket(self): + """ + Get the published socket config. + + :return: String + """ + return self._get_config(key='socket') + + def set_config(self, sandbox_image=None): + """ + Set the configuration to be published. + + :param sandbox_image: String to optionally override the sandbox image + :return: None + """ + for relation in self.relations: + relation.to_publish.update({ + 'sandbox_image': sandbox_image + }) diff --git a/kubernetes-worker/hooks/relations/container-runtime/requires.py b/kubernetes-worker/hooks/relations/container-runtime/requires.py new file mode 100644 index 0000000..c461b68 --- /dev/null +++ b/kubernetes-worker/hooks/relations/container-runtime/requires.py @@ -0,0 +1,61 @@ +from charms.reactive import ( + Endpoint, + clear_flag, + data_changed, + is_data_changed, + toggle_flag +) + + +class ContainerRuntimeRequires(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'), + self.is_joined and self._config_changed()) + + def _config_changed(self): + """ + Determine if our received data has changed. + + :return: Boolean + """ + # NB: this call should match whatever we're tracking in handle_remote_config + return is_data_changed('containerd.remote_config', + [self.get_sandbox_image()]) + + def handle_remote_config(self): + """ + Keep track of received data so we can know if it changes. + + :return: None + """ + clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure')) + # Presently, we only care about one piece of remote config. Expand + # the list as needed. + data_changed('containerd.remote_config', + [self.get_sandbox_image()]) + + def get_sandbox_image(self): + """ + Get the sandbox image URI if a remote has published one. + + :return: String: remotely configured sandbox image + """ + return self.all_joined_units.received.get('sandbox_image') + + def set_config(self, socket, runtime, nvidia_enabled): + """ + Set the configuration to be published. + + :param socket: String uri to runtime socket + :param runtime: String runtime executable + :param nvidia_enabled: Boolean nvidia runtime enabled + :return: None + """ + for relation in self.relations: + relation.to_publish.update({ + 'socket': socket, + 'runtime': runtime, + 'nvidia_enabled': nvidia_enabled + }) diff --git a/kubernetes-worker/hooks/relations/coordinator/peers.py b/kubernetes-worker/hooks/relations/coordinator/peers.py new file mode 100644 index 0000000..f443bf6 --- /dev/null +++ b/kubernetes-worker/hooks/relations/coordinator/peers.py @@ -0,0 +1,21 @@ +# Copyright 2016-2018 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju charms. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charms import reactive + + +class CoordinatorPeer(reactive.Endpoint): + pass diff --git a/kubernetes-worker/hooks/relations/gcp-integration/.gitignore b/kubernetes-worker/hooks/relations/gcp-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/gcp-integration/LICENSE b/kubernetes-worker/hooks/relations/gcp-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/gcp-integration/README.md b/kubernetes-worker/hooks/relations/gcp-integration/README.md new file mode 100644 index 0000000..42861fb --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `gcp-integration` interface communication protocol +and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:gcp-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:gcp-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `gcp-integration` interface protocol: + +```yaml +requires: + gcp: + interface: gcp-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the gcp-integrator charm) diff --git a/kubernetes-worker/hooks/relations/gcp-integration/__init__.py b/kubernetes-worker/hooks/relations/gcp-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/gcp-integration/copyright b/kubernetes-worker/hooks/relations/gcp-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md b/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md new file mode 100644 index 0000000..6f29a39 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/docs/provides.md @@ -0,0 +1,183 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the GCP +integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for GCP integration features. + The GCP integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. + +

GCPIntegrationProvides

+ +```python +GCPIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.gcp.requests-pending') +def handle_requests(): + gcp = endpoint_from_flag('endpoint.gcp.requests-pending') + for request in gcp.requests: + if request.instance_labels: + layer.gcp.label_instance( + request.instance, + request.zone, + request.instance_labels) + if request.requested_load_balancer_management: + layer.gcp.enable_load_balancer_management( + request.charm, + request.instance, + request.zone, + ) + # ... + gcp.mark_completed() +``` + +

relation_ids

+ + +A list of the IDs of all established relations. + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

get_departed_charms

+ +```python +GCPIntegrationProvides.get_departed_charms(self) +``` + +Get a list of all charms that have had all units depart since the +last time this was called. + +

mark_completed

+ +```python +GCPIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

application_name

+ + +The name of the application making the request. + +

charm

+ + +The charm name reported for this request. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

instance

+ + +The instance name reported for this request. + +

instance_labels

+ + +Mapping of label names to values to apply to this instance. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

model_uuid

+ + +The UUID of the model containing the application making this request. + +

relation_id

+ + +The ID of the relation for the unit making the request. + +

requested_block_storage_management

+ + +Flag indicating whether block storage management was requested. + +

requested_dns_management

+ + +Flag indicating whether DNS management was requested. + +

requested_instance_inspection

+ + +Flag indicating whether the ability to inspect instances was requested. + +

requested_network_management

+ + +Flag indicating whether the ability to manage networking was requested. + +

requested_object_storage_access

+ + +Flag indicating whether object storage access was requested. + +

requested_object_storage_management

+ + +Flag indicating whether object storage management was requested. + +

requested_security_management

+ + +Flag indicating whether security management was requested. + +

unit_name

+ + +The name of the unit making the request. + +

zone

+ + +The zone reported for this request. + +

mark_completed

+ +```python +IntegrationRequest.mark_completed(self) +``` + +Mark this request as having been completed. + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(self, credentials) +``` + +Set the credentials for this request. + diff --git a/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md b/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md new file mode 100644 index 0000000..36e23c2 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/docs/requires.md @@ -0,0 +1,140 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that +wish to request integration with GCP native features. The integration will +be provided by the GCP integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of GCP specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific GCP features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the GCP instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. + +

GCPIntegrationRequires

+ +```python +GCPIntegrationRequires(self, *args, **kwargs) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.gcp.joined') +def request_gcp_integration(): + gcp = endpoint_from_flag('endpoint.gcp.joined') + gcp.label_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + gcp.request_load_balancer_management() + # ... + +@when('endpoint.gcp.ready') +def gcp_integration_ready(): + update_config_enable_gcp() +``` + +

instance

+ + +This unit's instance name. + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

zone

+ + +The zone this unit is in. + +

label_instance

+ +```python +GCPIntegrationRequires.label_instance(self, labels) +``` + +Request that the given labels be applied to this instance. + +__Parameters__ + +- __`labels` (dict)__: Mapping of labels names to values. + +

enable_instance_inspection

+ +```python +GCPIntegrationRequires.enable_instance_inspection(self) +``` + +Request the ability to inspect instances. + +

enable_network_management

+ +```python +GCPIntegrationRequires.enable_network_management(self) +``` + +Request the ability to manage networking. + +

enable_security_management

+ +```python +GCPIntegrationRequires.enable_security_management(self) +``` + +Request the ability to manage security (e.g., firewalls). + +

enable_block_storage_management

+ +```python +GCPIntegrationRequires.enable_block_storage_management(self) +``` + +Request the ability to manage block storage. + +

enable_dns_management

+ +```python +GCPIntegrationRequires.enable_dns_management(self) +``` + +Request the ability to manage DNS. + +

enable_object_storage_access

+ +```python +GCPIntegrationRequires.enable_object_storage_access(self) +``` + +Request the ability to access object storage. + +

enable_object_storage_management

+ +```python +GCPIntegrationRequires.enable_object_storage_management(self) +``` + +Request the ability to manage object storage. + diff --git a/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml b/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml new file mode 100644 index 0000000..9966e3f --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/interface.yaml @@ -0,0 +1,4 @@ +name: gcp-integration +summary: Interface for connecting to the GCP integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-worker/hooks/relations/gcp-integration/make_docs b/kubernetes-worker/hooks/relations/gcp-integration/make_docs new file mode 100644 index 0000000..bd4e54e --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'gcp': {'interface': 'gcp-integration'}}, + 'provides': {'gcp': {'interface': 'gcp-integration'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/gcp-integration/provides.py b/kubernetes-worker/hooks/relations/gcp-integration/provides.py new file mode 100644 index 0000000..ba34b0d --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/provides.py @@ -0,0 +1,253 @@ +""" +This is the provides side of the interface layer, for use only by the GCP +integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for GCP integration features. + The GCP integration charm should then iterate over each request, perform + whatever actions are necessary to satisfy those requests, and then mark + them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class GCPIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.gcp.requests-pending') + def handle_requests(): + gcp = endpoint_from_flag('endpoint.gcp.requests-pending') + for request in gcp.requests: + if request.instance_labels: + layer.gcp.label_instance( + request.instance, + request.zone, + request.instance_labels) + if request.requested_load_balancer_management: + layer.gcp.enable_load_balancer_management( + request.charm, + request.instance, + request.zone, + ) + # ... + gcp.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def requests(self): + """ + A list of the new or updated #IntegrationRequests that + have been made. + """ + if not hasattr(self, '_requests'): + all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + is_changed = attrgetter('is_changed') + self._requests = list(filter(is_changed, all_requests)) + return self._requests + + @property + def relation_ids(self): + """ + A list of the IDs of all established relations. + """ + return [relation.relation_id for relation in self.relations] + + def get_departed_charms(self): + """ + Get a list of all charms that have had all units depart since the + last time this was called. + """ + joined_charms = {unit.received['charm'] + for unit in self.all_joined_units + if unit.received['charm']} + departed_charms = [unit.received['charm'] + for unit in self.all_departed_units + if unit.received['charm'] not in joined_charms] + self.all_departed_units.clear() + return departed_charms + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + for request in self.requests: + request.mark_completed() + clear_flag(self.expand_name('requests-pending')) + self._requests = [] + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def _completed(self): + return self._to_publish.get('completed', {}) + + @property + def _requested(self): + return self._unit.received['requested'] + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + if not all([self.charm, self.instance, self.zone, self._requested]): + return False + return self._completed.get(self.instance) != self._requested + + def mark_completed(self): + """ + Mark this request as having been completed. + """ + completed = self._completed + completed[self.instance] = self._requested + self._to_publish['completed'] = completed # have to explicitly update + + def set_credentials(self, credentials): + """ + Set the credentials for this request. + """ + self._unit.relation.to_publish['credentials'] = credentials + + @property + def has_credentials(self): + """ + Whether or not credentials have been set via `set_credentials`. + """ + return 'credentials' in self._unit.relation.to_publish + + @property + def relation_id(self): + """ + The ID of the relation for the unit making the request. + """ + return self._unit.relation.relation_id + + @property + def unit_name(self): + """ + The name of the unit making the request. + """ + return self._unit.unit_name + + @property + def application_name(self): + """ + The name of the application making the request. + """ + return self._unit.application_name + + @property + def charm(self): + """ + The charm name reported for this request. + """ + return self._unit.received['charm'] + + @property + def instance(self): + """ + The instance name reported for this request. + """ + return self._unit.received['instance'] + + @property + def zone(self): + """ + The zone reported for this request. + """ + return self._unit.received['zone'] + + @property + def model_uuid(self): + """ + The UUID of the model containing the application making this request. + """ + return self._unit.received['model-uuid'] + + @property + def instance_labels(self): + """ + Mapping of label names to values to apply to this instance. + """ + # uses dict() here to make a copy, just to be safe + return dict(self._unit.received.get('instance-labels', {})) + + @property + def requested_instance_inspection(self): + """ + Flag indicating whether the ability to inspect instances was requested. + """ + return bool(self._unit.received['enable-instance-inspection']) + + @property + def requested_network_management(self): + """ + Flag indicating whether the ability to manage networking was requested. + """ + return bool(self._unit.received['enable-network-management']) + + @property + def requested_security_management(self): + """ + Flag indicating whether security management was requested. + """ + return bool(self._unit.received['enable-security-management']) + + @property + def requested_block_storage_management(self): + """ + Flag indicating whether block storage management was requested. + """ + return bool(self._unit.received['enable-block-storage-management']) + + @property + def requested_dns_management(self): + """ + Flag indicating whether DNS management was requested. + """ + return bool(self._unit.received['enable-dns-management']) + + @property + def requested_object_storage_access(self): + """ + Flag indicating whether object storage access was requested. + """ + return bool(self._unit.received['enable-object-storage-access']) + + @property + def requested_object_storage_management(self): + """ + Flag indicating whether object storage management was requested. + """ + return bool(self._unit.received['enable-object-storage-management']) diff --git a/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml new file mode 100644 index 0000000..9ef5e78 --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'GCP Integration Interface' + +generate: + - requires.md: + - requires + - requires.GCPIntegrationRequires+ + - provides.md: + - provides + - provides.GCPIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/gcp-integration/requires.py b/kubernetes-worker/hooks/relations/gcp-integration/requires.py new file mode 100644 index 0000000..bbd191f --- /dev/null +++ b/kubernetes-worker/hooks/relations/gcp-integration/requires.py @@ -0,0 +1,227 @@ +""" +This is the requires side of the interface layer, for use in charms that +wish to request integration with GCP native features. The integration will +be provided by the GCP integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of GCP specific +API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific GCP features. This flag is automatically removed if + the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the GCP instance on which the charm is + running. This flag is automatically removed if new integration features + are requested. It should not be removed by the charm. +""" + + +import os +import random +import string +from urllib.parse import urljoin +from urllib.request import urlopen, Request + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, toggle_flag + + +# block size to read data from GCP metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class GCPIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.gcp.joined') + def request_gcp_integration(): + gcp = endpoint_from_flag('endpoint.gcp.joined') + gcp.label_instance({ + 'tag1': 'value1', + 'tag2': None, + }) + gcp.request_load_balancer_management() + # ... + + @when('endpoint.gcp.ready') + def gcp_integration_ready(): + update_config_enable_gcp() + ``` + """ + # https://cloud.google.com/compute/docs/storing-retrieving-metadata + _metadata_url = 'http://metadata.google.internal/computeMetadata/v1/' + _instance_url = urljoin(_metadata_url, 'instance/name') + _zone_url = urljoin(_metadata_url, 'instance/zone') + _metadata_headers = {'Metadata-Flavor': 'Google'} + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._instance = None + self._zone = None + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single GCP integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single GCP integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.joined') + def send_instance_info(self): + self._to_publish['charm'] = hookenv.charm_name() + self._to_publish['instance'] = self.instance + self._to_publish['zone'] = self.zone + self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID'] + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + toggle_flag(self.expand_name('ready'), self.is_ready) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def instance(self): + """ + This unit's instance name. + """ + if self._instance is None: + cache_key = self.expand_name('instance') + cached = unitdata.kv().get(cache_key) + if cached: + self._instance = cached + else: + req = Request(self._instance_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + instance = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._instance = instance + unitdata.kv().set(cache_key, self._instance) + return self._instance + + @property + def zone(self): + """ + The zone this unit is in. + """ + if self._zone is None: + cache_key = self.expand_name('zone') + cached = unitdata.kv().get(cache_key) + if cached: + self._zone = cached + else: + req = Request(self._zone_url, + headers=self._metadata_headers) + with urlopen(req) as fd: + zone = fd.read(READ_BLOCK_SIZE).decode('utf8').strip() + self._zone = zone.split('/')[-1] + unitdata.kv().set(cache_key, self._zone) + return self._zone + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + requested = self._to_publish['requested'] + completed = self._received.get('completed', {}).get(self.instance) + return requested and requested == completed + + @property + def credentials(self): + return self._received['credentials'] + + def _request(self, keyvals): + alphabet = string.ascii_letters + string.digits + nonce = ''.join(random.choice(alphabet) for _ in range(8)) + self._to_publish.update(keyvals) + self._to_publish['requested'] = nonce + clear_flag(self.expand_name('ready')) + + def label_instance(self, labels): + """ + Request that the given labels be applied to this instance. + + # Parameters + `labels` (dict): Mapping of labels names to values. + """ + self._request({'instance-labels': dict(labels)}) + + def enable_instance_inspection(self): + """ + Request the ability to inspect instances. + """ + self._request({'enable-instance-inspection': True}) + + def enable_network_management(self): + """ + Request the ability to manage networking. + """ + self._request({'enable-network-management': True}) + + def enable_security_management(self): + """ + Request the ability to manage security (e.g., firewalls). + """ + self._request({'enable-security-management': True}) + + def enable_block_storage_management(self): + """ + Request the ability to manage block storage. + """ + self._request({'enable-block-storage-management': True}) + + def enable_dns_management(self): + """ + Request the ability to manage DNS. + """ + self._request({'enable-dns': True}) + + def enable_object_storage_access(self): + """ + Request the ability to access object storage. + """ + self._request({'enable-object-storage-access': True}) + + def enable_object_storage_management(self): + """ + Request the ability to manage object storage. + """ + self._request({'enable-object-storage-management': True}) diff --git a/kubernetes-worker/hooks/relations/http/.gitignore b/kubernetes-worker/hooks/relations/http/.gitignore new file mode 100644 index 0000000..3374ec2 --- /dev/null +++ b/kubernetes-worker/hooks/relations/http/.gitignore @@ -0,0 +1,5 @@ +# Emacs save files +*~ +\#*\# +.\#* + diff --git a/kubernetes-worker/hooks/relations/http/README.md b/kubernetes-worker/hooks/relations/http/README.md new file mode 100644 index 0000000..3d7822a --- /dev/null +++ b/kubernetes-worker/hooks/relations/http/README.md @@ -0,0 +1,68 @@ +# Overview + +This interface layer implements the basic form of the `http` interface protocol, +which is used for things such as reverse-proxies, load-balanced servers, REST +service discovery, et cetera. + +# Usage + +## Provides + +By providing the `http` interface, your charm is providing an HTTP server that +can be load-balanced, reverse-proxied, used as a REST endpoint, etc. + +Your charm need only provide the port on which it is serving its content, as +soon as the `{relation_name}.available` state is set: + +```python +@when('website.available') +def configure_website(website): + website.configure(port=hookenv.config('port')) +``` + +## Requires + +By requiring the `http` interface, your charm is consuming one or more HTTP +servers, as a REST endpoint, to load-balance a set of servers, etc. + +Your charm should respond to the `{relation_name}.available` state, which +indicates that there is at least one HTTP server connected. + +The `services()` method returns a list of available HTTP services and their +associated hosts and ports. + +The return value is a list of dicts of the following form: + +```python +[ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... +] +``` + +A trivial example of handling this interface would be: + +```python +from charms.reactive.helpers import data_changed + +@when('reverseproxy.available') +def update_reverse_proxy_config(reverseproxy): + services = reverseproxy.services() + if not data_changed('reverseproxy.services', services): + return + for service in services: + for host in service['hosts']: + hookenv.log('{} has a unit {}:{}'.format( + services['service_name'], + host['hostname'], + host['port'])) +``` diff --git a/kubernetes-worker/hooks/relations/http/__init__.py b/kubernetes-worker/hooks/relations/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/http/interface.yaml b/kubernetes-worker/hooks/relations/http/interface.yaml new file mode 100644 index 0000000..54e7748 --- /dev/null +++ b/kubernetes-worker/hooks/relations/http/interface.yaml @@ -0,0 +1,4 @@ +name: http +summary: Basic HTTP interface +version: 1 +repo: https://git.launchpad.net/~bcsaller/charms/+source/http diff --git a/kubernetes-worker/hooks/relations/http/provides.py b/kubernetes-worker/hooks/relations/http/provides.py new file mode 100644 index 0000000..86fa9b3 --- /dev/null +++ b/kubernetes-worker/hooks/relations/http/provides.py @@ -0,0 +1,67 @@ +import json + +from charmhelpers.core import hookenv +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpProvides(Endpoint): + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def get_ingress_address(self, rel_id=None): + # If no rel_id is provided, we fallback to the first one + if rel_id is None: + rel_id = self.relations[0].relation_id + return hookenv.ingress_address(rel_id, hookenv.local_unit()) + + def configure(self, port, private_address=None, hostname=None): + ''' configure the address(es). private_address and hostname can + be None, a single string address/hostname, or a list of addresses + and hostnames. Note that if a list is passed, it is assumed both + private_address and hostname are either lists or None ''' + for relation in self.relations: + ingress_address = self.get_ingress_address(relation.relation_id) + if type(private_address) is list or type(hostname) is list: + # build 3 lists to zip together that are the same length + length = max(len(private_address), len(hostname)) + p = [port] * length + a = private_address + [ingress_address] *\ + (length - len(private_address)) + h = hostname + [ingress_address] * (length - len(hostname)) + zipped_list = zip(p, a, h) + # now build an array of dictionaries from that in the desired + # format for the interface + data_list = [{'hostname': h, 'port': p, 'private-address': a} + for p, a, h in zipped_list] + # for backwards compatibility, we just send a single entry + # and have an array of dictionaries in a field of that + # entry for the other entries. + data = data_list.pop(0) + data['extended_data'] = json.dumps(data_list) + + relation.to_publish_raw.update(data) + else: + relation.to_publish_raw.update({ + 'hostname': hostname or ingress_address, + 'private-address': private_address or ingress_address, + 'port': port, + }) + + def set_remote(self, **kwargs): + # NB: This method provides backwards compatibility for charms that + # called RelationBase.set_remote. Most commonly, this was done by + # charms that needed to pass reverse proxy stanzas to http proxies. + # This type of interaction with base relation classes is discouraged, + # and should be handled with logic encapsulated in appropriate + # interfaces. Eventually, this method will be deprecated in favor of + # that behavior. + for relation in self.relations: + relation.to_publish_raw.update(kwargs) diff --git a/kubernetes-worker/hooks/relations/http/requires.py b/kubernetes-worker/hooks/relations/http/requires.py new file mode 100644 index 0000000..17ea6b7 --- /dev/null +++ b/kubernetes-worker/hooks/relations/http/requires.py @@ -0,0 +1,76 @@ +import json + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class HttpRequires(Endpoint): + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + if any(unit.received_raw['port'] for unit in self.all_joined_units): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + + def services(self): + """ + Returns a list of available HTTP services and their associated hosts + and ports. + + The return value is a list of dicts of the following form:: + + [ + { + 'service_name': name_of_service, + 'hosts': [ + { + 'hostname': address_of_host, + 'private-address': private_address_of_host, + 'port': port_for_host, + }, + # ... + ], + }, + # ... + ] + """ + def build_service_host(data): + private_address = data['private-address'] + host = data['hostname'] or private_address + if host and data['port']: + return (host, private_address, data['port']) + else: + return None + + services = {} + for relation in self.relations: + service_name = relation.application_name + service = services.setdefault(service_name, { + 'service_name': service_name, + 'hosts': [], + }) + host_set = set() + for unit in relation.joined_units: + data = unit.received_raw + host = build_service_host(data) + if host: + host_set.add(host) + + # if we have extended data, add it + if 'extended_data' in data: + for ed in json.loads(data['extended_data']): + host = build_service_host(ed) + if host: + host_set.add(host) + + service['hosts'] = [ + {'hostname': h, 'private-address': pa, 'port': p} + for h, pa, p in sorted(host_set) + ] + + ret = [s for s in services.values() if s['hosts']] + return ret diff --git a/kubernetes-worker/hooks/relations/kube-control/.travis.yml b/kubernetes-worker/hooks/relations/kube-control/.travis.yml new file mode 100644 index 0000000..d2be8be --- /dev/null +++ b/kubernetes-worker/hooks/relations/kube-control/.travis.yml @@ -0,0 +1,9 @@ +language: python +python: + - "3.5" + - "3.6" + - "3.7" +install: + - pip install tox-travis +script: + - tox diff --git a/kubernetes-worker/hooks/relations/kube-control/README.md b/kubernetes-worker/hooks/relations/kube-control/README.md new file mode 100644 index 0000000..6f9ecb7 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kube-control/README.md @@ -0,0 +1,171 @@ +# kube-control interface + +This interface provides communication between master and workers in a +Kubernetes cluster. + + +## Provides (kubernetes-master side) + + +### States + +* `kube-control.connected` + + Enabled when a worker has joined the relation. + +* `kube-control.gpu.available` + + Enabled when any worker has indicated that it is running in gpu mode. + +* `kube-control.departed` + + Enabled when any worker has indicated that it is leaving the cluster. + + +* `kube-control.auth.requested` + + Enabled when an authentication credential is requested. This state is + temporary and will be removed once the units authentication request has + been fulfilled. + +### Methods + +* `kube_control.set_dns(port, domain, sdn_ip)` + + Sends DNS info to the connected worker(s). + + +* `kube_control.auth_user()` + + Returns a list of the requested username and group requested for + authentication. + +* `kube_control.sign_auth_request(scope, user, kubelet_token, proxy_token, client_token)` + + Sends authentication tokens to the unit scope for the requested user + and kube-proxy services. + +* `kube_control.set_cluster_tag(cluster_tag)` + + Sends a tag used to identify resources that are part of the cluster to the + connected worker(s). + +* `kube_control.flush_departed()` + + Returns the unit departing the kube_control relationship so you can do any + post removal cleanup. Such as removing authentication tokens for the unit. + Invoking this method will also remove the `kube-control.departed` state + +* `kube_control.set_registry_location(registry_location)` + Sends the container image registry location to the connected worker(s). + +### Examples + +```python + +@when('kube-control.connected') +def send_dns(kube_control): + # send port, domain, sdn_ip to the remote side + kube_control.set_dns(53, "cluster.local", "10.1.0.10") + +@when('kube-control.gpu.available') +def on_gpu_available(kube_control): + # The remote side is gpu-enable, handle it somehow + assert kube_control.get_gpu() == True + + +@when('kube-control.departed') +@when('leadership.is_leader') +def flush_auth_for_departed(kube_control): + ''' Unit has left the cluster and needs to have its authentication + tokens removed from the token registry ''' + departing_unit = kube_control.flush_departed() + +``` + +## Requires (kubernetes-worker side) + + +### States + +* `kube-control.connected` + + Enabled when a master has joined the relation. + +* `kube-control.dns.available` + + Enabled when DNS info is available from the master. + +* `kube-control.auth.available` + + Enabled when authentication credentials are present from the master. + +* `kube-control.cluster_tag.available` + + Enabled when cluster tag is present from the master. + +* `kube-control.registry_location.available` + + Enabled when registry location is present from the master. + +### Methods + +* `kube_control.get_dns()` + + Returns a dictionary of DNS info sent by the master. The keys in the + dict are: domain, private-address, sdn-ip, port. + +* `kube_control.set_gpu(enabled=True)` + + Tell the master that we are gpu-enabled. + +* `kube_control.get_auth_credentials(user)` + + Returns a dict with the users authentication credentials. + +* `set_auth_request(kubelet, group='system:nodes')` + + Issue an authentication request against the master to receive token based + auth credentials in return. + +* `kube_control.get_cluster_tag()` + + Returns the cluster tag provided by the master. + +* `kube_control.get_registry_location()` + + Returns the container image registry location provided by the master. + +### Examples + +```python + +@when('kube-control.dns.available') +def on_dns_available(kube_control): + # Remote side has sent DNS info + dns = kube_control.get_dns() + print(context['domain']) + print(context['private-address']) + print(context['sdn-ip']) + print(context['port']) + +@when('kube-control.connected') +def send_gpu(kube_control): + # Tell the master that we're gpu-enabled + kube_control.set_gpu(True) + +@when('kube-control.auth.available') +def display_auth_tokens(kube_control): + # Remote side has sent auth info + auth = kube_control.get_auth_credentials('root') + print(auth['kubelet_token']) + print(auth['proxy_token']) + print(auth['client_token']) + +@when('kube-control.connected') +@when_not('kube-control.auth.available') +def request_auth_credentials(kube_control): + # Request an admin user with sudo level access named 'root' + kube_control.set_auth_request('root', group='system:masters') + +``` diff --git a/kubernetes-worker/hooks/relations/kube-control/__init__.py b/kubernetes-worker/hooks/relations/kube-control/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/kube-control/interface.yaml b/kubernetes-worker/hooks/relations/kube-control/interface.yaml new file mode 100644 index 0000000..2f0b187 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kube-control/interface.yaml @@ -0,0 +1,6 @@ +name: kube-control +summary: Provides master-worker communication. +version: 1 +maintainer: "Tim Van Steenburgh " +ignore: +- tests diff --git a/kubernetes-worker/hooks/relations/kube-control/provides.py b/kubernetes-worker/hooks/relations/kube-control/provides.py new file mode 100644 index 0000000..050a175 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kube-control/provides.py @@ -0,0 +1,167 @@ +#!/usr/local/sbin/charm-env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from charms.reactive import ( + Endpoint, + toggle_flag, + set_flag, + data_changed +) + +from charmhelpers.core import ( + hookenv, + unitdata +) + + +DB = unitdata.kv() + + +class KubeControlProvider(Endpoint): + """ + Implements the kubernetes-master side of the kube-control interface. + """ + def manage_flags(self): + toggle_flag(self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag(self.expand_name('{endpoint_name}.gpu.available'), + self.is_joined and self._get_gpu()) + requests_data_id = self.expand_name('{endpoint_name}.requests') + requests = self.auth_user() + if data_changed(requests_data_id, requests): + set_flag(self.expand_name('{endpoint_name}.requests.changed')) + + def set_dns(self, port, domain, sdn_ip, enable_kube_dns): + """ + Send DNS info to the remote units. + + We'll need the port, domain, and sdn_ip of the dns service. If + sdn_ip is not required in your deployment, the units private-ip + is available implicitly. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'port': port, + 'domain': domain, + 'sdn-ip': sdn_ip, + 'enable-kube-dns': enable_kube_dns, + }) + + def auth_user(self): + """ + Return the kubelet_user value on the wire from the requestors. + """ + requests = [] + + for unit in self.all_joined_units: + requests.append( + (unit.unit_name, + {'user': unit.received_raw.get('kubelet_user'), + 'group': unit.received_raw.get('auth_group')}) + ) + + requests.sort() + return requests + + def sign_auth_request(self, scope, user, kubelet_token, proxy_token, + client_token): + """ + Send authorization tokens to the requesting unit. + """ + cred = { + 'scope': scope, + 'kubelet_token': kubelet_token, + 'proxy_token': proxy_token, + 'client_token': client_token + } + + if not DB.get('creds'): + DB.set('creds', {}) + + all_creds = DB.get('creds') + all_creds[user] = cred + DB.set('creds', all_creds) + + for relation in self.relations: + relation.to_publish.update({ + 'creds': all_creds + }) + + def clear_creds(self): + """ + Clear creds from the relation. This is used by non-leader units to stop + advertising creds so that the leader can assume full control of them. + """ + DB.unset('creds') + for relation in self.relations: + relation.to_publish_raw['creds'] = '' + + def _get_gpu(self): + """ + Return True if any remote worker is gpu-enabled. + """ + for unit in self.all_joined_units: + if unit.received_raw.get('gpu') == 'True': + hookenv.log('Unit {} has gpu enabled'.format(unit)) + return True + + return False + + def set_cluster_tag(self, cluster_tag): + """ + Send the cluster tag to the remote units. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'cluster-tag': cluster_tag + }) + + def set_registry_location(self, registry_location): + """ + Send the registry location to the remote units. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'registry-location': registry_location + }) + + def set_cohort_keys(self, cohort_keys): + """ + Send the cohort snapshot keys. + """ + for relation in self.relations: + relation.to_publish['cohort-keys'] = cohort_keys + + def set_default_cni(self, default_cni): + """ + Send the default CNI. The default_cni value should be a string + containing the name of a related CNI application to use as the + default CNI. For example: "flannel" or "calico". If no default has + been chosen then "" can be sent instead. + """ + for relation in self.relations: + relation.to_publish['default-cni'] = default_cni + + def set_api_endpoints(self, endpoints): + """ + Send the list of API endpoint URLs to which workers should connect. + """ + endpoints = sorted(endpoints) + for relation in self.relations: + relation.to_publish['api-endpoints'] = endpoints + + def set_has_xcp(self, has_xcp): + """ + Set the flag indicating that an external cloud provider is in use. + """ + for relation in self.relations: + relation.to_publish['has-xcp'] = bool(has_xcp) diff --git a/kubernetes-worker/hooks/relations/kube-control/requires.py b/kubernetes-worker/hooks/relations/kube-control/requires.py new file mode 100644 index 0000000..b72922d --- /dev/null +++ b/kubernetes-worker/hooks/relations/kube-control/requires.py @@ -0,0 +1,168 @@ +#!/usr/local/sbin/charm-env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import ( + Endpoint, + toggle_flag, +) + +from charmhelpers.core.hookenv import log + + +class KubeControlRequirer(Endpoint): + """ + Implements the kubernetes-worker side of the kube-control interface. + """ + def manage_flags(self): + """ + Set states corresponding to the data we have. + """ + toggle_flag( + self.expand_name('{endpoint_name}.connected'), + self.is_joined) + toggle_flag( + self.expand_name('{endpoint_name}.dns.available'), + self.is_joined and self.dns_ready()) + toggle_flag( + self.expand_name('{endpoint_name}.auth.available'), + self.is_joined and self._has_auth_credentials()) + toggle_flag( + self.expand_name('{endpoint_name}.cluster_tag.available'), + self.is_joined and self.get_cluster_tag()) + toggle_flag( + self.expand_name('{endpoint_name}.registry_location.available'), + self.is_joined and self.get_registry_location()) + toggle_flag( + self.expand_name('{endpoint_name}.cohort_keys.available'), + self.is_joined and self.cohort_keys) + toggle_flag( + self.expand_name('{endpoint_name}.default_cni.available'), + self.is_joined and self.get_default_cni() is not None) + toggle_flag( + self.expand_name('{endpoint_name}.api_endpoints.available'), + self.is_joined and self.get_api_endpoints()) + + def get_auth_credentials(self, user): + """ + Return the authentication credentials. + """ + rx = {} + for unit in self.all_joined_units: + rx.update(unit.received.get('creds', {})) + if not rx: + return None + + if user in rx: + return { + 'user': user, + 'kubelet_token': rx[user]['kubelet_token'], + 'proxy_token': rx[user]['proxy_token'], + 'client_token': rx[user]['client_token'] + } + else: + return None + + def get_dns(self): + """ + Return DNS info provided by the master. + """ + rx = self.all_joined_units.received_raw + + return { + 'port': rx.get('port'), + 'domain': rx.get('domain'), + 'sdn-ip': rx.get('sdn-ip'), + 'enable-kube-dns': rx.get('enable-kube-dns'), + } + + def dns_ready(self): + """ + Return True if we have all DNS info from the master. + """ + keys = ['port', 'domain', 'sdn-ip', 'enable-kube-dns'] + dns_info = self.get_dns() + return (set(dns_info.keys()) == set(keys) and + dns_info['enable-kube-dns'] is not None) + + def set_auth_request(self, kubelet, group='system:nodes'): + """ + Tell the master that we are requesting auth, and to use this + hostname for the kubelet system account. + + Param groups - Determines the level of eleveted privleges of the + requested user. Can be overridden to request sudo level access on the + cluster via changing to system:masters. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'kubelet_user': kubelet, + 'auth_group': group + }) + + def set_gpu(self, enabled=True): + """ + Tell the master that we're gpu-enabled (or not). + """ + log('Setting gpu={} on kube-control relation'.format(enabled)) + for relation in self.relations: + relation.to_publish_raw.update({ + 'gpu': enabled + }) + + def _has_auth_credentials(self): + """ + Predicate method to signal we have authentication credentials. + """ + if self.all_joined_units.received_raw.get('creds'): + return True + + def get_cluster_tag(self): + """ + Tag for identifying resources that are part of the cluster. + """ + return self.all_joined_units.received_raw.get('cluster-tag') + + def get_registry_location(self): + """ + URL for container image registry. + """ + return self.all_joined_units.received_raw.get('registry-location') + + @property + def cohort_keys(self): + """ + The cohort snapshot keys sent by the masters. + """ + return self.all_joined_units.received['cohort-keys'] + + def get_default_cni(self): + """ + Default CNI network to use. + """ + return self.all_joined_units.received['default-cni'] + + def get_api_endpoints(self): + """ + Returns a list of API endpoint URLs. + """ + endpoints = set() + for unit in self.all_joined_units: + endpoints.update(unit.received['api-endpoints'] or []) + return sorted(endpoints) + + @property + def has_xcp(self): + """ + The flag indicating whether an external cloud provider is in use. + """ + return self.all_joined_units.received.get("has-xcp", False) diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore new file mode 100644 index 0000000..8d150f3 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/README.md b/kubernetes-worker/hooks/relations/kubernetes-cni/README.md new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/__init__.py b/kubernetes-worker/hooks/relations/kubernetes-cni/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml b/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml new file mode 100644 index 0000000..7e3c123 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/interface.yaml @@ -0,0 +1,6 @@ +name: kubernetes-cni +summary: Interface for relating various CNI implementations +version: 0 +maintainer: "George Kraft " +ignore: +- tests diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py new file mode 100644 index 0000000..dae1361 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py @@ -0,0 +1,81 @@ +#!/usr/bin/python + +from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path +from charms.reactive import Endpoint +from charms.reactive import toggle_flag, clear_flag + + +class CNIPluginProvider(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) + + def config_available(self): + """Ensures all config from the CNI plugin is available.""" + goal_state = hookenv.goal_state() + related_apps = [ + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app + ] + if not related_apps: + return False + configs = self.get_configs() + return all( + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] + ) + + def get_config(self, default=None): + """Get CNI config for one related application. + + If default is specified, and there is a related application with a + matching name, then that application is chosen. Otherwise, the + application is chosen alphabetically. + + Whichever application is chosen, that application's CNI config is + returned. + """ + configs = self.get_configs() + if not configs: + return {} + elif default and default not in configs: + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") + return self.get_config() + elif default: + return configs.get(default, {}) + else: + return configs.get(sorted(configs)[0], {}) + + def get_configs(self): + """Get CNI configs for all related applications. + + This returns a mapping of application names to CNI configs. Here's an + example return value: + { + 'flannel': { + 'cidr': '10.1.0.0/16', + 'cni-conf-file': '10-flannel.conflist' + }, + 'calico': { + 'cidr': '192.168.0.0/16', + 'cni-conf-file': '10-calico.conflist' + } + } + """ + return { + relation.application_name: relation.joined_units.received_raw + for relation in self.relations + if relation.application_name + } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py new file mode 100644 index 0000000..349aa07 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py @@ -0,0 +1,42 @@ +#!/usr/bin/python + +from charmhelpers.core import unitdata +from charms.reactive import Endpoint +from charms.reactive import when_any, when_not +from charms.reactive import set_state, remove_state + +db = unitdata.kv() + + +class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) + + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") + def changed(self): + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) + + @when_not("endpoint.{endpoint_name}.joined") + def broken(self): + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + + def get_config(self): + """Get the kubernetes configuration information.""" + return self.all_joined_units.received_raw + + def set_config(self, cidr, cni_conf_file): + """Sets the CNI configuration information.""" + for relation in self.relations: + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/kubernetes-worker/hooks/relations/mount/.gitignore b/kubernetes-worker/hooks/relations/mount/.gitignore new file mode 100644 index 0000000..f3558c7 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/.gitignore @@ -0,0 +1,105 @@ +# emacs files +*~ +\#*\# + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/kubernetes-worker/hooks/relations/mount/LICENSE b/kubernetes-worker/hooks/relations/mount/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/mount/README.md b/kubernetes-worker/hooks/relations/mount/README.md new file mode 100644 index 0000000..99c2394 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/README.md @@ -0,0 +1,2 @@ +# interface-mount +Interface layer for connecting to mounts to a charm such as NFS diff --git a/kubernetes-worker/hooks/relations/mount/__init__.py b/kubernetes-worker/hooks/relations/mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/mount/copyright b/kubernetes-worker/hooks/relations/mount/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/mount/interface.yaml b/kubernetes-worker/hooks/relations/mount/interface.yaml new file mode 100644 index 0000000..ff68ab1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/interface.yaml @@ -0,0 +1,4 @@ +name: mount +summary: Interface for mounting filesystems like NFS. +version: 1 +maintainer: Mike Wilson diff --git a/kubernetes-worker/hooks/relations/mount/provides.py b/kubernetes-worker/hooks/relations/mount/provides.py new file mode 100644 index 0000000..b68b0a8 --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/provides.py @@ -0,0 +1,39 @@ +from charms.reactive import when_any +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class MountProvides(Endpoint): + + @when_any('endpoint.{endpoint_name}.changed', + 'endpoint.{endpoint_name}.departed') + def changed(self): + set_flag(self.expand_name('{endpoint_name}.changed')) + clear_flag(self.expand_name('endpoint.{endpoint_name}.changed')) + clear_flag(self.expand_name('endpoint.{endpoint_name}.departed')) + + def get_mount_requests(self): + return [{ + 'identifier': relation.relation_id, + 'application_name': relation.joined_units.received_raw.get( + 'export_name', relation.application_name), + 'addresses': [ + unit.received_raw.get('ingress-address', + unit.received_raw['private-address']) + for unit in relation.joined_units], + } for relation in self.relations] + + def configure(self, responses): + for response in responses: + relation = self.relations[response['identifier']] + relation.to_publish_raw.update({ + 'mountpoint': response['mountpoint'], + 'fstype': response['fstype'], + 'options': response['options'], + }) + for key in ('export_name', 'hostname'): + if key in response: + relation.to_publish_raw[key] = response[key] + elif key in relation.to_publish_raw: + del relation.to_publish_raw[key] + clear_flag(self.expand_name('{endpoint_name}.changed')) diff --git a/kubernetes-worker/hooks/relations/mount/requires.py b/kubernetes-worker/hooks/relations/mount/requires.py new file mode 100644 index 0000000..6f503ed --- /dev/null +++ b/kubernetes-worker/hooks/relations/mount/requires.py @@ -0,0 +1,71 @@ +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag +from charms.reactive import Endpoint + + +class MountRequires(Endpoint): + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.joined')) + + @when('endpoint.{endpoint_name}.changed') + def changed(self): + if any(unit.received_raw['mountpoint'] + for unit in self.all_joined_units): + set_flag(self.expand_name('{endpoint_name}.available')) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.joined')) + clear_flag(self.expand_name('{endpoint_name}.available')) + + def set_export_name(self, export_name): + for relation in self.relations: + relation.to_publish_raw['export_name'] = export_name + + def mounts(self): + """ + Returns a list of available mounts and their associated data. + + The return value is a list of dicts of the following form:: + + [ + { + 'mount_name': name_of_mount, + 'mounts': [ + { + 'hostname': hostname, + 'mountpoint': mountpoint, + 'fstype': mounttype, + 'options': options + }, + # ... + ], + }, + # ... + ] + """ + mounts = {} + for relation in self.relations: + for unit in relation.joined_units: + mount_name = unit.received_raw.get( + 'export_name', relation.application_name) + mount = mounts.setdefault(mount_name, { + 'mount_name': mount_name, + 'mounts': [], + }) + data = unit.received_raw + mountpoint = data['mountpoint'] + fstype = data['fstype'] + options = data['options'] + host = data['hostname'] or \ + data['private-address'] + if host and mountpoint and fstype and options: + mount['mounts'].append({ + 'hostname': host, + 'mountpoint': mountpoint, + 'fstype': fstype, + 'options': options + }) + return [m for m in mounts.values() if m['mounts']] diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/README.md b/kubernetes-worker/hooks/relations/nrpe-external-master/README.md new file mode 100644 index 0000000..e33deb8 --- /dev/null +++ b/kubernetes-worker/hooks/relations/nrpe-external-master/README.md @@ -0,0 +1,66 @@ +# nrpe-external-master interface + +Use this interface to register nagios checks in your charm layers. + +## Purpose + +This interface is designed to interoperate with the +[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm. + +## How to use in your layers + +The event handler for `nrpe-external-master.available` is called with an object +through which you can register your own custom nagios checks, when a relation +is established with `nrpe-external-master:nrpe-external-master`. + +This object provides a method, + +_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_) + +which is called to register a nagios plugin check for your service. + +All arguments are required. + +*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable. + +*name* is the name of the check registered in nagios + +*description* is some text that describes what the check is for and what it does + +*context* is the nagios context name, something that identifies your application + +*unit* is `hookenv.local_unit()` + +The nrpe subordinate installs `check_http`, so you can use it like this: + +``` +@when('nrpe-external-master.available') +def setup_nagios(nagios): + config = hookenv.config() + unit_name = hookenv.local_unit() + nagios.add_check(['/usr/lib/nagios/plugins/check_http', + '-I', '127.0.0.1', '-p', str(config['port']), + '-e', " 200 OK", '-u', '/publickey'], + name="check_http", + description="Verify my awesome service is responding", + context=config["nagios_context"], + unit=unit_name, + ) +``` +If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service. + +Consult the nagios documentation for more information on [how to write your own +plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html) +or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need. + +## Example deployment + +``` +$ juju deploy your-awesome-charm +$ juju deploy nrpe-external-master --config site-nagios.yaml +$ juju add-relation your-awesome-charm nrpe-external-master +``` + +where `site-nagios.yaml` has the necessary configuration settings for the +subordinate to connect to nagios. + diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/__init__.py b/kubernetes-worker/hooks/relations/nrpe-external-master/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml b/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml new file mode 100644 index 0000000..859a423 --- /dev/null +++ b/kubernetes-worker/hooks/relations/nrpe-external-master/interface.yaml @@ -0,0 +1,3 @@ +name: nrpe-external-master +summary: Nagios interface +version: 1 diff --git a/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py b/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py new file mode 100644 index 0000000..b10f501 --- /dev/null +++ b/kubernetes-worker/hooks/relations/nrpe-external-master/provides.py @@ -0,0 +1,62 @@ +import datetime + +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class NrpeExternalMasterProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:nrpe-external-master}-relation-{joined,changed}') + def changed_nrpe(self): + self.set_state('{relation_name}.available') + + @hook('{provides:nrpe-external-master}-relation-{broken,departed}') + def broken_nrpe(self): + self.remove_state('{relation_name}.available') + + def add_check(self, args, name=None, description=None, context=None, + servicegroups=None, unit=None): + unit = unit.replace('/', '-') + check_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +command[%(check_name)s]=%(check_args)s +""" + service_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service { + use active-service + host_name %(context)s-%(unit_name)s + service_description %(description)s + check_command check_nrpe!%(check_name)s + servicegroups %(servicegroups)s +} +""" + check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name) + with open(check_filename, "w") as fh: + fh.write(check_tmpl % { + 'check_args': ' '.join(args), + 'check_name': name, + }) + service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % ( + unit, name) + with open(service_filename, "w") as fh: + fh.write(service_tmpl % { + 'servicegroups': servicegroups or context, + 'context': context, + 'description': description, + 'check_name': name, + 'unit_name': unit, + }) + + def updated(self): + relation_info = { + 'timestamp': datetime.datetime.now().isoformat(), + } + self.set_remote(**relation_info) + self.remove_state('{relation_name}.available') diff --git a/kubernetes-worker/hooks/relations/openstack-integration/.gitignore b/kubernetes-worker/hooks/relations/openstack-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/openstack-integration/LICENSE b/kubernetes-worker/hooks/relations/openstack-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/openstack-integration/README.md b/kubernetes-worker/hooks/relations/openstack-integration/README.md new file mode 100644 index 0000000..ae021c2 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `openstack-integration` interface communciation +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:openstack-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:openstack-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `openstack-integration` interface protocol: + +```yaml +requires: + openstack: + interface: openstack-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the openstack-integrator charm) diff --git a/kubernetes-worker/hooks/relations/openstack-integration/__init__.py b/kubernetes-worker/hooks/relations/openstack-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/openstack-integration/copyright b/kubernetes-worker/hooks/relations/openstack-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md b/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md new file mode 100644 index 0000000..ee17ac6 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/docs/provides.md @@ -0,0 +1,108 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the +OpenStack integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for OpenStack integration + features. The OpenStack integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. + +

OpenStackIntegrationProvides

+ +```python +OpenStackIntegrationProvides(endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.openstack.requests-pending') +def handle_requests(): + openstack = endpoint_from_flag('endpoint.openstack.requests-pending') + for request in openstack.requests: + request.set_credentials(layer.openstack.get_user_credentials()) + openstack.mark_completed() +``` + +

all_requests

+ + +A list of all of the [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.all_requests.IntegrationRequests) that have been made. + +

new_requests

+ + +A list of the new or updated [`IntegrationRequests`](#provides.OpenStackIntegrationProvides.new_requests.IntegrationRequests) that have been made. + +

mark_completed

+ +```python +OpenStackIntegrationProvides.mark_completed() +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(unit) +``` + +A request for integration from a single remote unit. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(auth_url, + region, + username, + password, + user_domain_name, + project_domain_name, + project_name, + endpoint_tls_ca, + version=None) +``` + +Set the credentials for this request. + +

set_lbaas_config

+ +```python +IntegrationRequest.set_lbaas_config(subnet_id, + floating_network_id, + lb_method, + manage_security_groups, + has_octavia=None) +``` + +Set the load-balancer-as-a-service config for this request. + +

set_block_storage_config

+ +```python +IntegrationRequest.set_block_storage_config(bs_version, trust_device_path, + ignore_volume_az) +``` + +Set the block storage config for this request. + diff --git a/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md b/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md new file mode 100644 index 0000000..510e292 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/docs/requires.md @@ -0,0 +1,160 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that wish +to request integration with OpenStack native features. The integration will be +provided by the OpenStack integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of OpenStack +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific OpenStack features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the OpenStack instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. + +

OpenStackIntegrationRequires

+ +```python +OpenStackIntegrationRequires(endpoint_name, relation_ids=None) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.openstack.ready') +def openstack_integration_ready(): + openstack = endpoint_from_flag('endpoint.openstack.ready') + update_config_enable_openstack(openstack) +``` + +

auth_url

+ + +The authentication endpoint URL. + +

bs_version

+ + +What block storage API version to use, `auto` if autodetection is +desired, or `None` to use the default. + +

endpoint_tls_ca

+ + +Optional base64-encoded CA certificate for the authentication endpoint, +or None. + +

floating_network_id

+ + +Optional floating network ID, or None. + +

has_octavia

+ + +Whether the underlying OpenStack supports Octavia instead of +Neutron-based LBaaS. + +Will either be True, False, or None if it could not be determined for +some reason (typically due to connecting to an older integrator charm). + +

ignore_volume_az

+ + +Whether to ignore availability zones when attaching Cinder volumes. + +Will be `True`, `False`, or `None`. + +

is_changed

+ + +Whether or not the request for this instance has changed. + +

is_ready

+ + +Whether or not the request for this instance has been completed. + +

lb_method

+ + +Optional load-balancer method, or None. + +

manage_security_groups

+ + +Whether or not the Load Balancer should automatically manage security +group rules. + +Will be `True` or `False`. + +

password

+ + +The password. + +

project_domain_name

+ + +The project domain name. + +

project_name

+ + +The project name, also known as the tenant ID. + +

region

+ + +The region name. + +

subnet_id

+ + +Optional subnet ID to work in, or None. + +

trust_device_path

+ + +Whether to trust the block device name provided by Ceph. + +Will be `True`, `False`, or `None`. + +

user_domain_name

+ + +The user domain name. + +

username

+ + +The username. + +

version

+ + +Optional version number for the APIs or None. + diff --git a/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml b/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml new file mode 100644 index 0000000..a94fed4 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/interface.yaml @@ -0,0 +1,4 @@ +name: openstack-integration +summary: Interface for connecting to the OpenStack integrator charm. +version: 1 +maintainer: Cory Johns diff --git a/kubernetes-worker/hooks/relations/openstack-integration/make_docs b/kubernetes-worker/hooks/relations/openstack-integration/make_docs new file mode 100644 index 0000000..a09c66f --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'openstack': {'interface': 'openstack'}}, + 'provides': {'openstack': {'interface': 'openstack'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/openstack-integration/provides.py b/kubernetes-worker/hooks/relations/openstack-integration/provides.py new file mode 100644 index 0000000..2c788d6 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/provides.py @@ -0,0 +1,154 @@ +""" +This is the provides side of the interface layer, for use only by the +OpenStack integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for OpenStack integration + features. The OpenStack integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class OpenStackIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.openstack.requests-pending') + def handle_requests(): + openstack = endpoint_from_flag('endpoint.openstack.requests-pending') + for request in openstack.requests: + request.set_credentials(layer.openstack.get_user_credentials()) + openstack.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.all_requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def all_requests(self): + """ + A list of all of the #IntegrationRequests that have been made. + """ + if not hasattr(self, '_all_requests'): + self._all_requests = [IntegrationRequest(unit) + for unit in self.all_joined_units] + return self._all_requests + + @property + def new_requests(self): + """ + A list of the new or updated #IntegrationRequests that have been made. + """ + is_changed = attrgetter('is_changed') + return list(filter(is_changed, self.all_requests)) + + def mark_completed(self): + """ + Mark all requests as completed and remove the `requests-pending` flag. + """ + clear_flag(self.expand_name('requests-pending')) + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + return not self.has_credentials + + @property + def unit_name(self): + return self._unit.unit_name + + def set_credentials(self, + auth_url, + region, + username, + password, + user_domain_name, + project_domain_name, + project_name, + endpoint_tls_ca, + version=None): + """ + Set the credentials for this request. + """ + self._unit.relation.to_publish.update({ + 'auth_url': auth_url, + 'region': region, + 'username': username, + 'password': password, + 'user_domain_name': user_domain_name, + 'project_domain_name': project_domain_name, + 'project_name': project_name, + 'endpoint_tls_ca': endpoint_tls_ca, + 'version': version, + }) + + def set_lbaas_config(self, + subnet_id, + floating_network_id, + lb_method, + manage_security_groups, + has_octavia=None, + internal_lb=False): + """ + Set the load-balancer-as-a-service config for this request. + """ + self._unit.relation.to_publish.update({ + 'subnet_id': subnet_id, + 'floating_network_id': floating_network_id, + 'lb_method': lb_method, + 'internal_lb': internal_lb, + 'manage_security_groups': manage_security_groups, + 'has_octavia': has_octavia, + }) + + def set_block_storage_config(self, + bs_version, + trust_device_path, + ignore_volume_az): + """ + Set the block storage config for this request. + """ + self._unit.relation.to_publish.update({ + 'bs_version': bs_version, + 'trust_device_path': trust_device_path, + 'ignore_volume_az': ignore_volume_az, + }) + + @property + def has_credentials(self): + """ + Whether or not credentials have been set via `set_credentials`. + """ + return 'credentials' in self._unit.relation.to_publish diff --git a/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml new file mode 100644 index 0000000..aa0a286 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'OpenStack Integration Interface' + +generate: + - requires.md: + - requires + - requires.OpenStackIntegrationRequires+ + - provides.md: + - provides + - provides.OpenStackIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/openstack-integration/requires.py b/kubernetes-worker/hooks/relations/openstack-integration/requires.py new file mode 100644 index 0000000..3566b45 --- /dev/null +++ b/kubernetes-worker/hooks/relations/openstack-integration/requires.py @@ -0,0 +1,263 @@ +""" +This is the requires side of the interface layer, for use in charms that wish +to request integration with OpenStack native features. The integration will be +provided by the OpenStack integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of OpenStack +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific OpenStack features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the OpenStack instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. +""" + + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag, is_flag_set +from charms.reactive import data_changed + + +class OpenStackIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.openstack.ready') + def openstack_integration_ready(): + openstack = endpoint_from_flag('endpoint.openstack.ready') + update_config_enable_openstack(openstack) + ``` + """ + + @property + def _received(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single OpenStack integration application with a + single unit. + """ + return self.relations[0].joined_units.received + + @property + def _to_publish(self): + """ + Helper to streamline access to received data since we expect to only + ever be connected to a single OpenStack integration application with a + single unit. + """ + return self.relations[0].to_publish + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + # My middle name is ready. No, that doesn't sound right. + # I eat ready for breakfast. + was_ready = is_flag_set(self.expand_name('ready')) + toggle_flag(self.expand_name('ready'), self.is_ready) + if self.is_ready and was_ready and self.is_changed: + set_flag(self.expand_name('ready.changed')) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + # Although more information can be passed, such as LBaaS access + # the minimum needed to be considered ready is defined here + return all(field is not None for field in [ + self.auth_url, + self.username, + self.password, + self.user_domain_name, + self.project_domain_name, + self.project_name, + ]) + + @property + def is_changed(self): + """ + Whether or not the request for this instance has changed. + """ + return data_changed(self.expand_name('all-data'), [ + self.auth_url, + self.region, + self.username, + self.password, + self.user_domain_name, + self.project_domain_name, + self.project_name, + self.endpoint_tls_ca, + self.subnet_id, + self.floating_network_id, + self.lb_method, + self.internal_lb, + self.manage_security_groups, + ]) + + @property + def auth_url(self): + """ + The authentication endpoint URL. + """ + return self._received['auth_url'] + + @property + def region(self): + """ + The region name. + """ + return self._received['region'] + + @property + def username(self): + """ + The username. + """ + return self._received['username'] + + @property + def password(self): + """ + The password. + """ + return self._received['password'] + + @property + def user_domain_name(self): + """ + The user domain name. + """ + return self._received['user_domain_name'] + + @property + def project_domain_name(self): + """ + The project domain name. + """ + return self._received['project_domain_name'] + + @property + def project_name(self): + """ + The project name, also known as the tenant ID. + """ + return self._received['project_name'] + + @property + def endpoint_tls_ca(self): + """ + Optional base64-encoded CA certificate for the authentication endpoint, + or None. + """ + return self._received['endpoint_tls_ca'] or None + + @property + def version(self): + """ + Optional version number for the APIs or None. + """ + return self._received['version'] or None + + @property + def subnet_id(self): + """ + Optional subnet ID to work in, or None. + """ + return self._received['subnet_id'] + + @property + def floating_network_id(self): + """ + Optional floating network ID, or None. + """ + return self._received['floating_network_id'] + + @property + def lb_method(self): + """ + Optional load-balancer method, or None. + """ + return self._received['lb_method'] + + @property + def internal_lb(self) -> bool: + """ + If should force internal loadbalancer use. + Defaults to false. + """ + return bool(self._received.get('internal_lb', False)) + + @property + def manage_security_groups(self): + """ + Whether or not the Load Balancer should automatically manage security + group rules. + + Will be `True` or `False`. + """ + return self._received['manage_security_groups'] or False + + @property + def bs_version(self): + """ + What block storage API version to use, `auto` if autodetection is + desired, or `None` to use the default. + """ + return self._received['bs_version'] + + @property + def trust_device_path(self): + """ + Whether to trust the block device name provided by Ceph. + + Will be `True`, `False`, or `None`. + """ + return self._received['trust_device_path'] + + @property + def ignore_volume_az(self): + """ + Whether to ignore availability zones when attaching Cinder volumes. + + Will be `True`, `False`, or `None`. + """ + return self._received['ignore_volume_az'] + + @property + def has_octavia(self): + """ + Whether the underlying OpenStack supports Octavia instead of + Neutron-based LBaaS. + + Will either be True, False, or None if it could not be determined for + some reason (typically due to connecting to an older integrator charm). + """ + return self._received['has_octavia'] diff --git a/kubernetes-worker/hooks/relations/prometheus/.gitignore b/kubernetes-worker/hooks/relations/prometheus/.gitignore new file mode 100644 index 0000000..c18dd8d --- /dev/null +++ b/kubernetes-worker/hooks/relations/prometheus/.gitignore @@ -0,0 +1 @@ +__pycache__/ diff --git a/kubernetes-worker/hooks/relations/prometheus/__init__.py b/kubernetes-worker/hooks/relations/prometheus/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/prometheus/interface.yaml b/kubernetes-worker/hooks/relations/prometheus/interface.yaml new file mode 100644 index 0000000..f59b0cf --- /dev/null +++ b/kubernetes-worker/hooks/relations/prometheus/interface.yaml @@ -0,0 +1,4 @@ +name: prometheus +summary: Prometheus scrape target specification layer. +version: 1 +repo: https://git.launchpad.net/interface-prometheus diff --git a/kubernetes-worker/hooks/relations/prometheus/provides.py b/kubernetes-worker/hooks/relations/prometheus/provides.py new file mode 100644 index 0000000..5e6da6d --- /dev/null +++ b/kubernetes-worker/hooks/relations/prometheus/provides.py @@ -0,0 +1,51 @@ +from charms.reactive import Endpoint +from charmhelpers.core import hookenv +from charms.reactive.flags import toggle_flag + + +''' +This interface is desiged to be compatible with a previous +implementation based on RelationBase. + +Specifically +- the `{endpoint_name}.available` flags +- the use of `to_publish_raw` to avoid double quoting of the values + as the old interface used plain values here +''' + + +class PrometheusProvides(Endpoint): + def manage_flags(self): + """ + Managing the available flag. + """ + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined) + # compatibility + toggle_flag(self.expand_name('{endpoint_name}.available'), + self.is_joined) + + def configure(self, port, path='/metrics', + scrape_interval=None, scrape_timeout=None, labels={}, + hostname=None): + """ + Interface method to set information provided to remote units + """ + + # Use our unit name if the label isn't provided + if labels.get('host') is None: + unit_name = hookenv.local_unit() + labels['host'] = unit_name.replace("/", "-") + + for relation in self.relations: + relation.to_publish_raw['hostname'] = hookenv.ingress_address( + relation.relation_id, hookenv.local_unit() + ) if hostname is None else hostname + + relation.to_publish_raw['port'] = port + relation.to_publish_raw['metrics_path'] = path + relation.to_publish['labels'] = labels + if scrape_interval is not None: + relation.to_publish_raw['scrape_interval'] = scrape_interval + if scrape_timeout is not None: + relation.to_publish_raw['scrape_timeout'] = scrape_timeout diff --git a/kubernetes-worker/hooks/relations/prometheus/requires.py b/kubernetes-worker/hooks/relations/prometheus/requires.py new file mode 100644 index 0000000..9cc9ed1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/prometheus/requires.py @@ -0,0 +1,74 @@ +from charms.reactive import Endpoint +from charms.reactive.flags import toggle_flag +from charmhelpers.core.hookenv import ingress_address + + +''' +This interface is desiged to be compatible with a previous +implementation based on RelationBase. + +The old `{endpoint_name}.available` flags are maintained +''' + + +class PrometheusRequires(Endpoint): + def manage_flags(self): + """ + Managing the availability flag based on the port field from a connected + unit. It is convention that remote units signal availability this way. + """ + is_available = len(self.targets()) > 0 + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + is_available) + # compatibility + toggle_flag(self.expand_name('{endpoint_name}.available'), + is_available) + + def targets(self): + """ + Interface method returns a list of available prometheus targets. + [ + { + 'job_name': name_of_job, + 'targets': [ host_address:host_port, ... ], + 'metrics_path': path_to_metrics_endpoint(optional), + 'scrape_interval': scrape_interval(optional), + 'scrape_timeout': scrape_timeout(optional), + 'labels': { "label": "value", ... }, + }, + # ... + ] + """ + services = {} + for relation in self.relations: + service_name = relation.application_name + for unit in relation.units: + service = services.setdefault(service_name, { + 'job_name': service_name, + 'targets': [], + }) + + # If the hostname is not provided we use the informaton from + # the relation + host = (unit.received['hostname'] or + ingress_address(relation.relation_id, unit.unit_name)) + port = unit.received['port'] + + # Skipping this unit if it isn't ready yet + if host and port: + service['targets'].append('{}:{}'.format(host, port)) + else: + continue + + if unit.received['metrics_path']: + service['metrics_path'] = unit.received['metrics_path'] + if unit.received['labels']: + service['labels'] = unit.received['labels'] + + # Optional fields + if unit.received['scrape_interval']: + service['scrape_interval'] = \ + unit.received['scrape_interval'] + if unit.received['scrape_timeout']: + service['scrape_timeout'] = unit.received['scrape_timeout'] + return [s for s in services.values() if s['targets']] diff --git a/kubernetes-worker/hooks/relations/tls-certificates/.gitignore b/kubernetes-worker/hooks/relations/tls-certificates/.gitignore new file mode 100644 index 0000000..93813bc --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/.gitignore @@ -0,0 +1,4 @@ +.tox +__pycache__ +*.pyc +_build diff --git a/kubernetes-worker/hooks/relations/tls-certificates/README.md b/kubernetes-worker/hooks/relations/tls-certificates/README.md new file mode 100644 index 0000000..733da6d --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/README.md @@ -0,0 +1,90 @@ +# Interface tls-certificates + +This is a [Juju][] interface layer that enables a charm which requires TLS +certificates to relate to a charm which can provide them, such as [Vault][] or +[EasyRSA][] + +To get started please read the [Introduction to PKI][] which defines some PKI +terms, concepts and processes used in this document. + +# Example Usage + +Let's say you have a charm which needs a server certificate for a service it +provides to other charms and a client certificate for a database it consumes +from another charm. The charm provides its own service on the `clients` +relation endpoint, and it consumes the database on the `db` relation endpoint. + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +requires: + cert-provider: + interface: tls-certificates +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:tls-certificates +``` + +Then, in your reactive code, add the following, changing `update_certs` to +handle the certificates however your charm needs: + +```python +from charmhelpers.core import hookenv, host +from charms.reactive import endpoint_from_flag + + +@when('cert-provider.ca.changed') +def install_root_ca_cert(): + cert_provider = endpoint_from_flag('cert-provider.ca.available') + host.install_ca_cert(cert_provider.root_ca_cert) + clear_flag('cert-provider.ca.changed') + + +@when('cert-provider.available') +def request_certificates(): + cert_provider = endpoint_from_flag('cert-provider.available') + + # get ingress info + ingress_for_clients = hookenv.network_get('clients')['ingress-addresses'] + ingress_for_db = hookenv.network_get('db')['ingress-addresses'] + + # use first ingress address as primary and any additional as SANs + server_cn, server_sans = ingress_for_clients[0], ingress_for_clients[:1] + client_cn, client_sans = ingress_for_db[0], ingress_for_db[:1] + + # request a single server and single client cert; note that multiple certs + # of either type can be requested as long as they have unique common names + cert_provider.request_server_cert(server_cn, server_sans) + cert_provider.request_client_cert(client_cn, client_sans) + + +@when('cert-provider.certs.changed') +def update_certs(): + cert_provider = endpoint_from_flag('cert-provider.available') + server_cert = cert_provider.server_certs[0] # only requested one + myserver.update_server_cert(server_cert.cert, server_cert.key) + + client_cert = cert_provider.client_certs[0] # only requested one + myclient.update_client_cert(client_cert.cert, client_cert.key) + clear_flag('cert-provider.certs.changed') +``` + + +# Reference + + * [Requires](docs/requires.md) + * [Provides](docs/provides.md) + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Vault]: https://jujucharms.com/u/openstack-charmers/vault +[EasyRSA]: https://jujucharms.com/u/containers/easyrsa +[Introduction to PKI]: https://github.com/OpenVPN/easy-rsa/blob/master/doc/Intro-To-PKI.md diff --git a/kubernetes-worker/hooks/relations/tls-certificates/__init__.py b/kubernetes-worker/hooks/relations/tls-certificates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md new file mode 100644 index 0000000..25d0e08 --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/common.md @@ -0,0 +1,51 @@ +

CertificateRequest

+ +```python +CertificateRequest(self, unit, cert_type, cert_name, common_name, sans) +``` + +

application_name

+ +Name of the application which the request came from. + +:returns: Name of application +:rtype: str + +

cert

+ + +The cert published for this request, if any. + +

cert_type

+ + +Type of certificate, 'server' or 'client', being requested. + +

resolve_unit_name

+ +```python +CertificateRequest.resolve_unit_name(unit) +``` +Return name of unit associated with this request. + +unit_name should be provided in the relation data to ensure +compatability with cross-model relations. If the unit name +is absent then fall back to unit_name attribute of the +unit associated with this request. + +:param unit: Unit to extract name from +:type unit: charms.reactive.endpoints.RelatedUnit +:returns: Name of unit +:rtype: str + +

Certificate

+ +```python +Certificate(self, cert_type, common_name, cert, key) +``` + +Represents a created certificate and key. + +The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can +be accessed either as properties or as the contents of the dict. + diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md new file mode 100644 index 0000000..c213546 --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/provides.md @@ -0,0 +1,212 @@ +

provides

+ + +

TlsProvides

+ +```python +TlsProvides(self, endpoint_name, relation_ids=None) +``` + +The provider's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[all_requests]: provides.md#provides.TlsProvides.all_requests +[new_requests]: provides.md#provides.TlsProvides.new_requests +[new_server_requests]: provides.md#provides.TlsProvides.new_server_requests +[new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + +

all_published_certs

+ + +List of all [Certificate][] instances that this provider has published +for all related applications. + +

all_requests

+ + +List of all requests that have been made. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('certs.regen', + 'tls.certs.available') +def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_application_requests

+ + +Filtered view of [new_requests][] that only includes application cert +requests. + +Each will be an instance of [ApplicationCertificateRequest][]. + +Example usage: + +```python +@when('tls.application.certs.requested') +def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_client_requests

+ + +Filtered view of [new_requests][] that only includes client cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.client.certs.requested') +def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_requests

+ + +Filtered view of [all_requests][] that only includes requests that +haven't been handled. + +Each will be an instance of [CertificateRequest][]. + +This collection can also be further filtered by request type using +[new_server_requests][] or [new_client_requests][]. + +Example usage: + +```python +@when('tls.certs.requested') +def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

new_server_requests

+ + +Filtered view of [new_requests][] that only includes server cert +requests. + +Each will be an instance of [CertificateRequest][]. + +Example usage: + +```python +@when('tls.server.certs.requested') +def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) +``` + +

set_ca

+ +```python +TlsProvides.set_ca(certificate_authority) +``` + +Publish the CA to all related applications. + +

set_chain

+ +```python +TlsProvides.set_chain(chain) +``` + +Publish the chain of trust to all related applications. + +

set_client_cert

+ +```python +TlsProvides.set_client_cert(cert, key) +``` + +Deprecated. This is only for backwards compatibility. + +Publish a globally shared client cert and key. + +

set_server_cert

+ +```python +TlsProvides.set_server_cert(scope, cert, key) +``` + +Deprecated. Use one of the [new_requests][] collections and +`request.set_cert()` instead. + +Set the server cert and key for the request identified by `scope`. + +

set_server_multicerts

+ +```python +TlsProvides.set_server_multicerts(scope) +``` + +Deprecated. Done automatically. + +

add_server_cert

+ +```python +TlsProvides.add_server_cert(scope, cn, cert, key) +``` + +Deprecated. Use `request.set_cert()` instead. + +

get_server_requests

+ +```python +TlsProvides.get_server_requests() +``` + +Deprecated. Use the [new_requests][] or [server_requests][] +collections instead. + +One provider can have many requests to generate server certificates. +Return a map of all server request objects indexed by a unique +identifier. + diff --git a/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md b/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md new file mode 100644 index 0000000..fdec902 --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/docs/requires.md @@ -0,0 +1,207 @@ +

requires

+ + +

TlsRequires

+ +```python +TlsRequires(self, endpoint_name, relation_ids=None) +``` + +The client's side of the interface protocol. + +The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + +The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + +[Certificate]: common.md#tls_certificates_common.Certificate +[CertificateRequest]: common.md#tls_certificates_common.CertificateRequest +[root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert +[root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain +[request_server_cert]: requires.md#requires.TlsRequires.request_server_cert +[request_client_cert]: requires.md#requires.TlsRequires.request_client_cert +[server_certs]: requires.md#requires.TlsRequires.server_certs +[server_certs_map]: requires.md#requires.TlsRequires.server_certs_map +[client_certs]: requires.md#requires.TlsRequires.server_certs + +

application_certs

+ + +List of [Certificate][] instances for all available application certs. + +

client_certs

+ + +List of [Certificate][] instances for all available client certs. + +

client_certs_map

+ + +Mapping of client [Certificate][] instances by their `common_name`. + +

root_ca_cert

+ + +Root CA certificate. + +

root_ca_chain

+ + +The chain of trust for the root CA. + +

server_certs

+ + +List of [Certificate][] instances for all available server certs. + +

server_certs_map

+ + +Mapping of server [Certificate][] instances by their `common_name`. + +

get_ca

+ +```python +TlsRequires.get_ca() +``` + +Return the root CA certificate. + +Same as [root_ca_cert][]. + +

get_chain

+ +```python +TlsRequires.get_chain() +``` + +Return the chain of trust for the root CA. + +Same as [root_ca_chain][]. + +

get_client_cert

+ +```python +TlsRequires.get_client_cert() +``` + +Deprecated. Use [request_client_cert][] and the [client_certs][] +collection instead. + +Return a globally shared client certificate and key. + +

get_server_cert

+ +```python +TlsRequires.get_server_cert() +``` + +Deprecated. Use the [server_certs][] collection instead. + +Return the cert and key of the first server certificate requested. + +

get_batch_requests

+ +```python +TlsRequires.get_batch_requests() +``` + +Deprecated. Use [server_certs_map][] instead. + +Mapping of server [Certificate][] instances by their `common_name`. + +

request_server_cert

+ +```python +TlsRequires.request_server_cert(cn, sans=None, cert_name=None) +``` + +Request a server certificate and key be generated for the given +common name (`cn`) and optional list of alternative names (`sans`). + +The `cert_name` is deprecated and not needed. + +This can be called multiple times to request more than one server +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

add_request_server_cert

+ +```python +TlsRequires.add_request_server_cert(cn, sans) +``` + +Deprecated. Use [request_server_cert][] instead. + +

request_server_certs

+ +```python +TlsRequires.request_server_certs() +``` + +Deprecated. Just use [request_server_cert][]; this does nothing. + +

request_client_cert

+ +```python +TlsRequires.request_client_cert(cn, sans) +``` + +Request a client certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans`). + +This can be called multiple times to request more than one client +certificate, although the common names must be unique. If called +again with the same common name, it will be ignored. + +

request_application_cert

+ +```python +TlsRequires.request_application_cert(cn, sans) +``` + +Request an application certificate and key be generated for the given +common name (`cn`) and list of alternative names (`sans` ) of this +unit and all peer units. All units will share a single certificates. + diff --git a/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml b/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml new file mode 100644 index 0000000..beec53b --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/interface.yaml @@ -0,0 +1,6 @@ +name: tls-certificates +summary: | + A Transport Layer Security (TLS) charm layer that uses requires and provides + to exchange certifcates. +version: 1 +repo: https://github.com/juju-solutions/interface-tls-certificates diff --git a/kubernetes-worker/hooks/relations/tls-certificates/make_docs b/kubernetes-worker/hooks/relations/tls-certificates/make_docs new file mode 100644 index 0000000..2f2274a --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/make_docs @@ -0,0 +1,23 @@ +#!.tox/py3/bin/python + +import sys +import importlib +from pathlib import Path +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'cert': {'interface': 'tls-certificates'}}, + 'provides': {'cert': {'interface': 'tls-certificates'}}, + } + sys.path.append('..') + sys.modules[''] = importlib.import_module(Path.cwd().name) + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/tls-certificates/provides.py b/kubernetes-worker/hooks/relations/tls-certificates/provides.py new file mode 100644 index 0000000..0262baa --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/provides.py @@ -0,0 +1,301 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag + +from .tls_certificates_common import ( + ApplicationCertificateRequest, + CertificateRequest +) + + +class TlsProvides(Endpoint): + """ + The provider's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever any clients are joined. + + * `{endpoint_name}.certs.requested` + When there are new certificate requests of any kind to be processed. + The requests can be accessed via [new_requests][]. + + * `{endpoint_name}.server.certs.requested` + When there are new server certificate requests to be processed. + The requests can be accessed via [new_server_requests][]. + + * `{endpoint_name}.client.certs.requested` + When there are new client certificate requests to be processed. + The requests can be accessed via [new_client_requests][]. + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [all_requests]: provides.md#provides.TlsProvides.all_requests + [new_requests]: provides.md#provides.TlsProvides.new_requests + [new_server_requests]: provides.md#provides.TlsProvides.new_server_requests + [new_client_requests]: provides.md#provides.TlsProvides.new_client_requests + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + set_flag(self.expand_name('{endpoint_name}.available')) + toggle_flag(self.expand_name('{endpoint_name}.certs.requested'), + self.new_requests) + toggle_flag(self.expand_name('{endpoint_name}.server.certs.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.certs.requested'), + self.new_client_requests) + toggle_flag( + self.expand_name('{endpoint_name}.application.certs.requested'), + self.new_application_requests) + # For backwards compatibility, set the old "cert" flags as well + toggle_flag(self.expand_name('{endpoint_name}.server.cert.requested'), + self.new_server_requests) + toggle_flag(self.expand_name('{endpoint_name}.client.cert.requested'), + self.new_client_requests) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + clear_flag(self.expand_name('{endpoint_name}.available')) + clear_flag(self.expand_name('{endpoint_name}.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.server.certs.requested')) + clear_flag(self.expand_name('{endpoint_name}.client.certs.requested')) + clear_flag( + self.expand_name('{endpoint_name}.application.certs.requested')) + + def set_ca(self, certificate_authority): + """ + Publish the CA to all related applications. + """ + for relation in self.relations: + # All the clients get the same CA, so send it to them. + relation.to_publish_raw['ca'] = certificate_authority + + def set_chain(self, chain): + """ + Publish the chain of trust to all related applications. + """ + for relation in self.relations: + # All the clients get the same chain, so send it to them. + relation.to_publish_raw['chain'] = chain + + def set_client_cert(self, cert, key): + """ + Deprecated. This is only for backwards compatibility. + + Publish a globally shared client cert and key. + """ + for relation in self.relations: + relation.to_publish_raw.update({ + 'client.cert': cert, + 'client.key': key, + }) + + def set_server_cert(self, scope, cert, key): + """ + Deprecated. Use one of the [new_requests][] collections and + `request.set_cert()` instead. + + Set the server cert and key for the request identified by `scope`. + """ + request = self.get_server_requests()[scope] + request.set_cert(cert, key) + + def set_server_multicerts(self, scope): + """ + Deprecated. Done automatically. + """ + pass + + def add_server_cert(self, scope, cn, cert, key): + ''' + Deprecated. Use `request.set_cert()` instead. + ''' + self.set_server_cert(scope, cert, key) + + def get_server_requests(self): + """ + Deprecated. Use the [new_requests][] or [server_requests][] + collections instead. + + One provider can have many requests to generate server certificates. + Return a map of all server request objects indexed by a unique + identifier. + """ + return {req._key: req for req in self.new_server_requests} + + @property + def all_requests(self): + """ + List of all requests that have been made. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('certs.regen', + 'tls.certs.available') + def regen_all_certs(): + tls = endpoint_from_flag('tls.certs.available') + for request in tls.all_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + requests = [] + for unit in self.all_joined_units: + # handle older single server cert request + if unit.received_raw['common_name']: + requests.append(CertificateRequest( + unit, + 'server', + unit.received_raw['certificate_name'], + unit.received_raw['common_name'], + unit.received['sans'], + )) + + # handle mutli server cert requests + reqs = unit.received['cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'server', + common_name, + common_name, + req['sans'], + )) + + # handle client cert requests + reqs = unit.received['client_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(CertificateRequest( + unit, + 'client', + common_name, + common_name, + req['sans'], + )) + # handle application cert requests + reqs = unit.received['application_cert_requests'] or {} + for common_name, req in reqs.items(): + requests.append(ApplicationCertificateRequest( + unit, + 'application', + common_name, + common_name, + req['sans'] + )) + return requests + + @property + def new_requests(self): + """ + Filtered view of [all_requests][] that only includes requests that + haven't been handled. + + Each will be an instance of [CertificateRequest][]. + + This collection can also be further filtered by request type using + [new_server_requests][] or [new_client_requests][]. + + Example usage: + + ```python + @when('tls.certs.requested') + def gen_certs(): + tls = endpoint_from_flag('tls.certs.requested') + for request in tls.new_requests: + cert, key = generate_cert(request.cert_type, + request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.all_requests if not req.is_handled] + + @property + def new_server_requests(self): + """ + Filtered view of [new_requests][] that only includes server cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.server.certs.requested') + def gen_server_certs(): + tls = endpoint_from_flag('tls.server.certs.requested') + for request in tls.new_server_requests: + cert, key = generate_server_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'server'] + + @property + def new_client_requests(self): + """ + Filtered view of [new_requests][] that only includes client cert + requests. + + Each will be an instance of [CertificateRequest][]. + + Example usage: + + ```python + @when('tls.client.certs.requested') + def gen_client_certs(): + tls = endpoint_from_flag('tls.client.certs.requested') + for request in tls.new_client_requests: + cert, key = generate_client_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + """ + return [req for req in self.new_requests if req.cert_type == 'client'] + + @property + def new_application_requests(self): + """ + Filtered view of [new_requests][] that only includes application cert + requests. + + Each will be an instance of [ApplicationCertificateRequest][]. + + Example usage: + + ```python + @when('tls.application.certs.requested') + def gen_application_certs(): + tls = endpoint_from_flag('tls.application.certs.requested') + for request in tls.new_application_requests: + cert, key = generate_application_cert(request.common_name, + request.sans) + request.set_cert(cert, key) + ``` + + :returns: List of certificate requests. + :rtype: [CertificateRequest, ] + """ + return [req for req in self.new_requests + if req.cert_type == 'application'] + + @property + def all_published_certs(self): + """ + List of all [Certificate][] instances that this provider has published + for all related applications. + """ + return [req.cert for req in self.all_requests if req.cert] diff --git a/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml b/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml new file mode 100644 index 0000000..c568913 --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/pydocmd.yml @@ -0,0 +1,19 @@ +site_name: 'TLS Certificates Interface' + +generate: + - requires.md: + - requires + - requires.TlsRequires+ + - provides.md: + - provides + - provides.TlsProvides+ + - common.md: + - tls_certificates_common.CertificateRequest+ + - tls_certificates_common.Certificate+ + +pages: + - Requires: requires.md + - Provides: provides.md + - Common: common.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/tls-certificates/requires.py b/kubernetes-worker/hooks/relations/tls-certificates/requires.py new file mode 100644 index 0000000..951f953 --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/requires.py @@ -0,0 +1,342 @@ +if not __package__: + # fix relative imports when building docs + import sys + __package__ = sys.modules[''].__name__ + +import uuid + +from charmhelpers.core import hookenv + +from charms.reactive import when, when_not +from charms.reactive import set_flag, clear_flag, toggle_flag +from charms.reactive import Endpoint +from charms.reactive import data_changed + +from .tls_certificates_common import Certificate + + +class TlsRequires(Endpoint): + """ + The client's side of the interface protocol. + + The following flags may be set: + + * `{endpoint_name}.available` + Whenever the relation is joined. + + * `{endpoint_name}.ca.available` + When the root CA information is available via the [root_ca_cert][] and + [root_ca_chain][] properties. + + * `{endpoint_name}.ca.changed` + When the root CA information has changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.certs.available` + When the requested server or client certs are available. + + * `{endpoint_name}.certs.changed` + When the requested server or client certs have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.server.certs.available` + When the server certificates requested by [request_server_cert][] are + available via the [server_certs][] collection. + + * `{endpoint_name}.server.certs.changed` + When the requested server certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + * `{endpoint_name}.client.certs.available` + When the client certificates requested by [request_client_cert][] are + available via the [client_certs][] collection. + + * `{endpoint_name}.client.certs.changed` + When the requested client certificates have changed, whether because + they have just become available or if they were regenerated by the CA. + Once processed this flag should be removed by the charm. + + The following flags have been deprecated: + + * `{endpoint_name}.server.cert.available` + * `{endpoint_name}.client.cert.available` + * `{endpoint_name}.batch.cert.available` + + [Certificate]: common.md#tls_certificates_common.Certificate + [CertificateRequest]: common.md#tls_certificates_common.CertificateRequest + [root_ca_cert]: requires.md#requires.TlsRequires.root_ca_cert + [root_ca_chain]: requires.md#requires.TlsRequires.root_ca_chain + [request_server_cert]: requires.md#requires.TlsRequires.request_server_cert + [request_client_cert]: requires.md#requires.TlsRequires.request_client_cert + [server_certs]: requires.md#requires.TlsRequires.server_certs + [server_certs_map]: requires.md#requires.TlsRequires.server_certs_map + [client_certs]: requires.md#requires.TlsRequires.server_certs + """ + + @when('endpoint.{endpoint_name}.joined') + def joined(self): + self.relations[0].to_publish_raw['unit_name'] = self._unit_name + prefix = self.expand_name('{endpoint_name}.') + ca_available = self.root_ca_cert + ca_changed = ca_available and data_changed(prefix + 'ca', + self.root_ca_cert) + server_available = self.server_certs + server_changed = server_available and data_changed(prefix + 'servers', + self.server_certs) + client_available = self.client_certs + client_changed = client_available and data_changed(prefix + 'clients', + self.client_certs) + certs_available = server_available or client_available + certs_changed = server_changed or client_changed + + set_flag(prefix + 'available') + toggle_flag(prefix + 'ca.available', ca_available) + toggle_flag(prefix + 'ca.changed', ca_changed) + toggle_flag(prefix + 'server.certs.available', server_available) + toggle_flag(prefix + 'server.certs.changed', server_changed) + toggle_flag(prefix + 'client.certs.available', client_available) + toggle_flag(prefix + 'client.certs.changed', client_changed) + toggle_flag(prefix + 'certs.available', certs_available) + toggle_flag(prefix + 'certs.changed', certs_changed) + # deprecated + toggle_flag(prefix + 'server.cert.available', self.server_certs) + toggle_flag(prefix + 'client.cert.available', self.get_client_cert()) + toggle_flag(prefix + 'batch.cert.available', self.server_certs) + + @when_not('endpoint.{endpoint_name}.joined') + def broken(self): + prefix = self.expand_name('{endpoint_name}.') + clear_flag(prefix + 'available') + clear_flag(prefix + 'ca.available') + clear_flag(prefix + 'ca.changed') + clear_flag(prefix + 'server.certs.available') + clear_flag(prefix + 'server.certs.changed') + clear_flag(prefix + 'client.certs.available') + clear_flag(prefix + 'client.certs.changed') + clear_flag(prefix + 'certs.available') + clear_flag(prefix + 'certs.changed') + # deprecated + clear_flag(prefix + 'server.cert.available') + clear_flag(prefix + 'client.cert.available') + clear_flag(prefix + 'batch.cert.available') + + @property + def _unit_name(self): + return hookenv.local_unit().replace('/', '_') + + @property + def root_ca_cert(self): + """ + Root CA certificate. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['ca'] + + def get_ca(self): + """ + Return the root CA certificate. + + Same as [root_ca_cert][]. + """ + return self.root_ca_cert + + @property + def root_ca_chain(self): + """ + The chain of trust for the root CA. + """ + # only the leader of the provider should set the CA, or all units + # had better agree + return self.all_joined_units.received_raw['chain'] + + def get_chain(self): + """ + Return the chain of trust for the root CA. + + Same as [root_ca_chain][]. + """ + return self.root_ca_chain + + def get_client_cert(self): + """ + Deprecated. Use [request_client_cert][] and the [client_certs][] + collection instead. + + Return a globally shared client certificate and key. + """ + data = self.all_joined_units.received_raw + return (data['client.cert'], data['client.key']) + + def get_server_cert(self): + """ + Deprecated. Use the [server_certs][] collection instead. + + Return the cert and key of the first server certificate requested. + """ + if not self.server_certs: + return (None, None) + cert = self.server_certs[0] + return (cert.cert, cert.key) + + @property + def server_certs(self): + """ + List of [Certificate][] instances for all available server certs. + """ + certs = [] + raw_data = self.all_joined_units.received_raw + json_data = self.all_joined_units.received + + # for backwards compatibility, the first cert goes in its own fields + if self.relations: + common_name = self.relations[0].to_publish_raw['common_name'] + cert = raw_data['{}.server.cert'.format(self._unit_name)] + key = raw_data['{}.server.key'.format(self._unit_name)] + if cert and key: + certs.append(Certificate('server', + common_name, + cert, + key)) + + # subsequent requests go in the collection + field = '{}.processed_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + certs.extend(Certificate('server', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()) + return certs + + @property + def application_certs(self): + """ + List containg the application Certificate cert. + + :returns: A list containing one certificate + :rtype: [Certificate()] + """ + certs = [] + json_data = self.all_joined_units.received + field = '{}.processed_application_requests'.format(self._unit_name) + certs_data = json_data[field] or {} + app_cert_data = certs_data.get('app_data') + if app_cert_data: + certs = [Certificate( + 'server', + 'app_data', + app_cert_data['cert'], + app_cert_data['key'])] + return certs + + @property + def server_certs_map(self): + """ + Mapping of server [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.server_certs} + + def get_batch_requests(self): + """ + Deprecated. Use [server_certs_map][] instead. + + Mapping of server [Certificate][] instances by their `common_name`. + """ + return self.server_certs_map + + @property + def client_certs(self): + """ + List of [Certificate][] instances for all available client certs. + """ + field = '{}.processed_client_requests'.format(self._unit_name) + certs_data = self.all_joined_units.received[field] or {} + return [Certificate('client', + common_name, + cert['cert'], + cert['key']) + for common_name, cert in certs_data.items()] + + @property + def client_certs_map(self): + """ + Mapping of client [Certificate][] instances by their `common_name`. + """ + return {cert.common_name: cert for cert in self.client_certs} + + def request_server_cert(self, cn, sans=None, cert_name=None): + """ + Request a server certificate and key be generated for the given + common name (`cn`) and optional list of alternative names (`sans`). + + The `cert_name` is deprecated and not needed. + + This can be called multiple times to request more than one server + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + to_publish_raw = self.relations[0].to_publish_raw + if to_publish_raw['common_name'] in (None, '', cn): + # for backwards compatibility, first request goes in its own fields + to_publish_raw['common_name'] = cn + to_publish_json['sans'] = sans or [] + cert_name = to_publish_raw.get('certificate_name') or cert_name + if cert_name is None: + cert_name = str(uuid.uuid4()) + to_publish_raw['certificate_name'] = cert_name + else: + # subsequent requests go in the collection + requests = to_publish_json.get('cert_requests', {}) + requests[cn] = {'sans': sans or []} + to_publish_json['cert_requests'] = requests + + def add_request_server_cert(self, cn, sans): + """ + Deprecated. Use [request_server_cert][] instead. + """ + self.request_server_cert(cn, sans) + + def request_server_certs(self): + """ + Deprecated. Just use [request_server_cert][]; this does nothing. + """ + pass + + def request_client_cert(self, cn, sans): + """ + Request a client certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans`). + + This can be called multiple times to request more than one client + certificate, although the common names must be unique. If called + again with the same common name, it will be ignored. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('client_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['client_cert_requests'] = requests + + def request_application_cert(self, cn, sans): + """ + Request an application certificate and key be generated for the given + common name (`cn`) and list of alternative names (`sans` ) of this + unit and all peer units. All units will share a single certificates. + """ + if not self.relations: + return + # assume we'll only be connected to one provider + to_publish_json = self.relations[0].to_publish + requests = to_publish_json.get('application_cert_requests', {}) + requests[cn] = {'sans': sans} + to_publish_json['application_cert_requests'] = requests diff --git a/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py b/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py new file mode 100644 index 0000000..99a2f8c --- /dev/null +++ b/kubernetes-worker/hooks/relations/tls-certificates/tls_certificates_common.py @@ -0,0 +1,302 @@ +from charms.reactive import clear_flag, is_data_changed, data_changed + + +class CertificateRequest(dict): + def __init__(self, unit, cert_type, cert_name, common_name, sans): + self._unit = unit + self._cert_type = cert_type + super().__init__({ + 'certificate_name': cert_name, + 'common_name': common_name, + 'sans': sans, + }) + + @property + def _key(self): + return '.'.join((self._unit.relation.relation_id, + self.unit_name, + self.common_name)) + + def resolve_unit_name(self, unit): + """Return name of unit associated with this request. + + unit_name should be provided in the relation data to ensure + compatability with cross-model relations. If the unit name + is absent then fall back to unit_name attribute of the + unit associated with this request. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: Name of unit + :rtype: str + """ + unit_name = unit.received_raw['unit_name'] + if not unit_name: + unit_name = unit.unit_name + return unit_name + + @property + def unit_name(self): + """Name of this unit. + + :returns: Name of unit + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).replace('/', '_') + + @property + def application_name(self): + """Name of the application which the request came from. + + :returns: Name of application + :rtype: str + """ + return self.resolve_unit_name(unit=self._unit).split('/')[0] + + @property + def cert_type(self): + """ + Type of certificate, 'server' or 'client', being requested. + """ + return self._cert_type + + @property + def cert_name(self): + return self['certificate_name'] + + @property + def common_name(self): + return self['common_name'] + + @property + def sans(self): + return self['sans'] + + @property + def _publish_key(self): + if self.cert_type == 'server': + return '{}.processed_requests'.format(self.unit_name) + elif self.cert_type == 'client': + return '{}.processed_client_requests'.format(self.unit_name) + raise ValueError('Unknown cert_type: {}'.format(self.cert_type)) + + @property + def _server_cert_key(self): + return '{}.server.cert'.format(self.unit_name) + + @property + def _server_key_key(self): + return '{}.server.key'.format(self.unit_name) + + @property + def _is_top_level_server_cert(self): + return (self.cert_type == 'server' and + self.common_name == self._unit.received_raw['common_name']) + + @property + def cert(self): + """ + The cert published for this request, if any. + """ + cert, key = None, None + if self._is_top_level_server_cert: + tpr = self._unit.relation.to_publish_raw + cert = tpr[self._server_cert_key] + key = tpr[self._server_key_key] + else: + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get(self.common_name, {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + def set_cert(self, cert, key): + rel = self._unit.relation + if self._is_top_level_server_cert: + # backwards compatibility; if this is the cert that was requested + # as a single server cert, set it in the response as the single + # server cert + rel.to_publish_raw.update({ + self._server_cert_key: cert, + self._server_key_key: key, + }) + else: + data = rel.to_publish.get(self._publish_key, {}) + data[self.common_name] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[self._publish_key] = data + if not rel.endpoint.new_server_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.server' + '.cert.requested')) + if not rel.endpoint.new_requests: + clear_flag(rel.endpoint.expand_name('{endpoint_name}.' + 'certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class ApplicationCertificateRequest(CertificateRequest): + """ + A request for an application consistent certificate. + + This is a request for a certificate that works for all units of an + application. All sans and cns are added together to produce one + certificate and the same certificate and key are sent to all the + units of an application. Only one ApplicationCertificateRequest + is needed per application. + """ + + @property + def _key(self): + """Key to identify this cert. + + :returns: cert key + :rtype: str + """ + return '{}.{}'.format(self._unit.relation.relation_id, 'app_cert') + + @property + def cert(self): + """ + The cert published for this request, if any. + + :returns: Certificate + :rtype: Certificate or None + """ + cert, key = None, None + tp = self._unit.relation.to_publish + certs_data = tp.get(self._publish_key, {}) + cert_data = certs_data.get('app_data', {}) + cert = cert_data.get('cert') + key = cert_data.get('key') + if cert and key: + return Certificate(self.cert_type, self.common_name, cert, key) + return None + + @property + def is_handled(self): + """Whether the certificate has been handled. + + :returns: If the cert has been handled + :rtype: bool + """ + has_cert = self.cert is not None + same_sans = not is_data_changed(self._key, + sorted(set(self.sans or []))) + return has_cert and same_sans + + @property + def sans(self): + """Generate a list of all sans from all units of application + + Examine all units of the application and compile a list of + all sans. CNs are treated as addition san entries. + + :returns: List of sans + :rtype: List[str] + """ + _sans = [] + for unit in self._unit.relation.units: + reqs = unit.received['application_cert_requests'] or {} + for cn, req in reqs.items(): + _sans.append(cn) + _sans.extend(req['sans']) + return sorted(list(set(_sans))) + + @property + def _request_key(self): + """Key used to request cert + + :returns: Key used to request cert + :rtype: str + """ + return 'application_cert_requests' + + def derive_publish_key(self, unit=None): + """Derive the application cert publish key for a unit. + + :param unit: Unit to extract name from + :type unit: charms.reactive.endpoints.RelatedUnit + :returns: publish key + :rtype: str + """ + if not unit: + unit = self._unit + unit_name = self.resolve_unit_name(unit).replace('/', '_') + return '{}.processed_application_requests'.format(unit_name) + + @property + def _publish_key(self): + """Key used to publish cert + + :returns: Key used to publish cert + :rtype: str + """ + return self.derive_publish_key(unit=self._unit) + + def set_cert(self, cert, key): + """Send the cert and key to all units of the application + + :param cert: TLS Certificate + :type cert: str + :param key: TLS Private Key + :type cert: str + """ + rel = self._unit.relation + for unit in self._unit.relation.units: + pub_key = self.derive_publish_key(unit=unit) + data = rel.to_publish.get( + pub_key, + {}) + data['app_data'] = { + 'cert': cert, + 'key': key, + } + rel.to_publish[pub_key] = data + if not rel.endpoint.new_application_requests: + clear_flag(rel.endpoint.expand_name( + '{endpoint_name}.application.certs.requested')) + data_changed(self._key, sorted(set(self.sans or []))) + + +class Certificate(dict): + """ + Represents a created certificate and key. + + The ``cert_type``, ``common_name``, ``cert``, and ``key`` values can + be accessed either as properties or as the contents of the dict. + """ + def __init__(self, cert_type, common_name, cert, key): + super().__init__({ + 'cert_type': cert_type, + 'common_name': common_name, + 'cert': cert, + 'key': key, + }) + + @property + def cert_type(self): + return self['cert_type'] + + @property + def common_name(self): + return self['common_name'] + + @property + def cert(self): + return self['cert'] + + @property + def key(self): + return self['key'] diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore b/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore new file mode 100644 index 0000000..5f9f2c5 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/.gitignore @@ -0,0 +1,3 @@ +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE b/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/README.md b/kubernetes-worker/hooks/relations/vsphere-integration/README.md new file mode 100644 index 0000000..28ff438 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/README.md @@ -0,0 +1,28 @@ +# Overview + +This layer encapsulates the `vsphere-integration` interface communication +protocol and provides an API for charms on either side of relations using this +interface. + +## Usage + +In your charm's `layer.yaml`, ensure that `interface:vsphere-integration` is +included in the `includes` section: + +```yaml +includes: ['layer:basic', 'interface:vsphere-integration'] +``` + +And in your charm's `metadata.yaml`, ensure that a relation endpoint is defined +using the `vsphere-integration` interface protocol: + +```yaml +requires: + vsphere: + interface: vsphere-integration +``` + +For documentation on how to use the API for this interface, see: + +* [Requires API documentation](docs/requires.md) +* [Provides API documentation](docs/provides.md) (this will only be used by the vsphere-integrator charm) diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/__init__.py b/kubernetes-worker/hooks/relations/vsphere-integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/copyright b/kubernetes-worker/hooks/relations/vsphere-integration/copyright new file mode 100644 index 0000000..a91bdf1 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md b/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md new file mode 100644 index 0000000..796b7e6 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/docs/provides.md @@ -0,0 +1,74 @@ +

provides

+ + +This is the provides side of the interface layer, for use only by the +vSphere integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for vSphere integration + features. The vSphere integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. + +

VsphereIntegrationProvides

+ +```python +VsphereIntegrationProvides(self, endpoint_name, relation_ids=None) +``` + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag +from charms import layer + +@when('endpoint.vsphere.requests-pending') +def handle_requests(): + vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending') + for request in vsphere.requests: + request.set_credentials(layer.vsphere.get_user_credentials()) + vsphere.mark_completed() +``` + +

requests

+ + +A list of the new or updated `IntegrationRequests` that +have been made. + +

mark_completed

+ +```python +VsphereIntegrationProvides.mark_completed(self) +``` + +Mark all requests as completed and remove the `requests-pending` flag. + +

IntegrationRequest

+ +```python +IntegrationRequest(self, unit) +``` + +A request for integration from a single remote unit. + +

has_credentials

+ + +Whether or not credentials have been set via `set_credentials`. + +

is_changed

+ + +Whether this request has changed since the last time it was +marked completed (if ever). + +

set_credentials

+ +```python +IntegrationRequest.set_credentials(self, vsphere_ip, user, password, datacenter, datastore) +``` + +Set the credentials for this request. diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md b/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md new file mode 100644 index 0000000..0ce10a9 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/docs/requires.md @@ -0,0 +1,56 @@ +

requires

+ + +This is the requires side of the interface layer, for use in charms that wish +to request integration with vSphere native features. The integration will be +provided by the vSphere integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of vSphere +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific vSphere features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the vSphere instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +

VsphereIntegrationRequires

+ +```python +VsphereIntegrationRequires(self, endpoint_name, relation_ids=None) +``` + +Interface to request integration access. + +Note that due to resource limits and permissions granularity, policies are +limited to being applied at the charm level. That means that, if any +permissions are requested (i.e., any of the enable methods are called), +what is granted will be the sum of those ever requested by any instance of +the charm on this cloud. + +Labels, on the other hand, will be instance specific. + +Example usage: + +```python +from charms.reactive import when, endpoint_from_flag + +@when('endpoint.vsphere.ready') +def vsphere_integration_ready(): + vsphere = endpoint_from_flag('endpoint.vsphere.joined') + update_config_enable_vsphere(vsphere.vsphere_ip, + vsphere.user, + vsphere.password, + vsphere.datacenter, + vsphere.datastore) +``` + +

is_ready

+ + +Whether or not the request for this instance has been completed. diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml b/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml new file mode 100644 index 0000000..c4c0c07 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/interface.yaml @@ -0,0 +1,4 @@ +name: vsphere-integration +summary: Interface for connecting to the VMware vSphere integrator charm. +version: 1 +maintainer: Kevin Monroe diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/make_docs b/kubernetes-worker/hooks/relations/vsphere-integration/make_docs new file mode 100644 index 0000000..04cf35b --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + metadata.return_value = { + 'requires': {'vsphere': {'interface': 'vsphere'}}, + 'provides': {'vsphere': {'interface': 'vsphere'}}, + } + sys.path.insert(0, '.') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/provides.py b/kubernetes-worker/hooks/relations/vsphere-integration/provides.py new file mode 100644 index 0000000..c3db1d8 --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/provides.py @@ -0,0 +1,132 @@ +""" +This is the provides side of the interface layer, for use only by the +vSphere integration charm itself. + +The flags that are set by the provides side of this interface are: + +* **`endpoint.{endpoint_name}.requested`** This flag is set when there is + a new or updated request by a remote unit for vSphere integration + features. The vSphere integration charm should then iterate over each + request, perform whatever actions are necessary to satisfy those requests, + and then mark them as complete. +""" + +from operator import attrgetter + +from charms.reactive import Endpoint +from charms.reactive import when +from charms.reactive import toggle_flag, clear_flag + + +class VsphereIntegrationProvides(Endpoint): + """ + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + from charms import layer + + @when('endpoint.vsphere.requests-pending') + def handle_requests(): + vsphere = endpoint_from_flag('endpoint.vsphere.requests-pending') + for request in vsphere.requests: + request.set_credentials(layer.vsphere.get_vsphere_credentials()) + request.set_config(layer.vsphere.get_vsphere_config()) + vsphere.mark_completed() + ``` + """ + + @when('endpoint.{endpoint_name}.changed') + def check_requests(self): + toggle_flag(self.expand_name('requests-pending'), + len(self.new_requests) > 0) + clear_flag(self.expand_name('changed')) + + @property + def all_requests(self): + """ + A list of all the #IntegrationRequests that have been made. + """ + return [IntegrationRequest(unit) for unit in self.all_joined_units] + + @property + def new_requests(self): + """ + A list of the new or updated #IntegrationRequests that have been made. + """ + is_changed = attrgetter('is_changed') + return list(filter(is_changed, self.all_requests)) + + def mark_completed(self): + """ + Remove the `requests-pending` flag. + """ + clear_flag(self.expand_name('requests-pending')) + + +class IntegrationRequest: + """ + A request for integration from a single remote unit. + """ + def __init__(self, unit): + self._unit = unit + + @property + def _to_publish(self): + return self._unit.relation.to_publish + + @property + def has_credentials(self): + """ + Whether or not `set_credentials` has been called. + """ + return {'vsphere_ip', 'user', + 'password', 'datacenter'}.issubset(self._to_publish) + + @property + def has_config(self): + """ + Whether or not `set_config` has been called. + """ + return {'datastore', 'folder', + 'respool_path'}.issubset(self._to_publish) + + @property + def is_changed(self): + """ + Whether this request has changed since the last time it was + marked completed (if ever). + """ + return not (self.has_credentials and self.has_config) + + @property + def unit_name(self): + return self._unit.unit_name + + def set_credentials(self, + vsphere_ip, + user, + password, + datacenter): + """ + Set the vsphere credentials for this request. + """ + self._to_publish.update({ + 'vsphere_ip': vsphere_ip, + 'user': user, + 'password': password, + 'datacenter': datacenter, + }) + + def set_config(self, + datastore, + folder, + respool_path): + """ + Set the non-credential vsphere config for this request. + """ + self._to_publish.update({ + 'datastore': datastore, + 'folder': folder, + 'respool_path': respool_path, + }) diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml b/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml new file mode 100644 index 0000000..e1d5d4a --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'VMware vSphere Integration Interface' + +generate: + - requires.md: + - requires + - requires.VsphereIntegrationRequires+ + - provides.md: + - provides + - provides.VsphereIntegrationProvides+ + - provides.IntegrationRequest+ + +pages: + - Requires: requires.md + - Provides: provides.md + +gens_dir: docs diff --git a/kubernetes-worker/hooks/relations/vsphere-integration/requires.py b/kubernetes-worker/hooks/relations/vsphere-integration/requires.py new file mode 100644 index 0000000..d8b9cdb --- /dev/null +++ b/kubernetes-worker/hooks/relations/vsphere-integration/requires.py @@ -0,0 +1,141 @@ +""" +This is the requires side of the interface layer, for use in charms that wish +to request integration with vSphere native features. The integration will be +provided by the vSphere integration charm, which allows the requiring charm +to not require cloud credentials itself and not have a lot of vSphere +specific API code. + +The flags that are set by the requires side of this interface are: + +* **`endpoint.{endpoint_name}.joined`** This flag is set when the relation + has been joined, and the charm should then use the methods documented below + to request specific vSphere features. This flag is automatically removed + if the relation is broken. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready`** This flag is set once the requested + features have been enabled for the vSphere instance on which the charm is + running. This flag is automatically removed if new integration features are + requested. It should not be removed by the charm. + +* **`endpoint.{endpoint_name}.ready.changed`** This flag is set if the data + changes after the ready flag was set. This flag should be removed by the + charm once handled. +""" + + +from charms.reactive import Endpoint +from charms.reactive import when, when_not +from charms.reactive import clear_flag, is_flag_set, set_flag, toggle_flag +from charms.reactive import data_changed + + +class VsphereIntegrationRequires(Endpoint): + """ + Interface to request integration access. + + Note that due to resource limits and permissions granularity, policies are + limited to being applied at the charm level. That means that, if any + permissions are requested (i.e., any of the enable methods are called), + what is granted will be the sum of those ever requested by any instance of + the charm on this cloud. + + Labels, on the other hand, will be instance specific. + + Example usage: + + ```python + from charms.reactive import when, endpoint_from_flag + + @when('endpoint.vsphere.ready') + def vsphere_integration_ready(): + vsphere = endpoint_from_flag('endpoint.vsphere.joined') + update_config_enable_vsphere(vsphere.vsphere_ip, + vsphere.user, + vsphere.password, + vsphere.datacenter, + vsphere.datastore, + vsphere.folder, + vsphere.respool_path) + ``` + """ + + @property + def _received(self): + """ + Helper to streamline access to received data. + """ + return self.all_joined_units.received + + @when('endpoint.{endpoint_name}.changed') + def check_ready(self): + """ + Manage flags to signal when the endpoint is ready as well as noting + if changes have been made since it became ready. + """ + was_ready = is_flag_set(self.expand_name('ready')) + toggle_flag(self.expand_name('ready'), self.is_ready) + if self.is_ready and was_ready and self.is_changed: + set_flag(self.expand_name('ready.changed')) + clear_flag(self.expand_name('changed')) + + @when_not('endpoint.{endpoint_name}.joined') + def remove_ready(self): + clear_flag(self.expand_name('ready')) + + @property + def is_ready(self): + """ + Whether or not the request for this instance has been completed. + """ + return all(field is not None for field in [ + self.vsphere_ip, + self.user, + self.password, + self.datacenter, + self.datastore, + self.folder, + self.respool_path, + ]) + + @property + def is_changed(self): + """ + Whether or not the request for this instance has changed. + """ + return data_changed(self.expand_name('all-data'), [ + self.vsphere_ip, + self.user, + self.password, + self.datacenter, + self.datastore, + self.folder, + self.respool_path, + ]) + + @property + def vsphere_ip(self): + return self._received['vsphere_ip'] + + @property + def user(self): + return self._received['user'] + + @property + def password(self): + return self._received['password'] + + @property + def datacenter(self): + return self._received['datacenter'] + + @property + def datastore(self): + return self._received['datastore'] + + @property + def folder(self): + return self._received['folder'] + + @property + def respool_path(self): + return self._received['respool_path'] diff --git a/kubernetes-worker/hooks/scrape-relation-broken b/kubernetes-worker/hooks/scrape-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/scrape-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/scrape-relation-changed b/kubernetes-worker/hooks/scrape-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/scrape-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/scrape-relation-created b/kubernetes-worker/hooks/scrape-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/scrape-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/scrape-relation-departed b/kubernetes-worker/hooks/scrape-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/scrape-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/scrape-relation-joined b/kubernetes-worker/hooks/scrape-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/scrape-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/start b/kubernetes-worker/hooks/start new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/start @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/stop b/kubernetes-worker/hooks/stop new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/stop @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/update-status b/kubernetes-worker/hooks/update-status new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/update-status @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/upgrade-charm b/kubernetes-worker/hooks/upgrade-charm new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/upgrade-charm @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/vsphere-relation-broken b/kubernetes-worker/hooks/vsphere-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/vsphere-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/vsphere-relation-changed b/kubernetes-worker/hooks/vsphere-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/vsphere-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/vsphere-relation-created b/kubernetes-worker/hooks/vsphere-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/vsphere-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/vsphere-relation-departed b/kubernetes-worker/hooks/vsphere-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/vsphere-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/hooks/vsphere-relation-joined b/kubernetes-worker/hooks/vsphere-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-worker/hooks/vsphere-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-worker/icon.svg b/kubernetes-worker/icon.svg new file mode 100644 index 0000000..a212804 --- /dev/null +++ b/kubernetes-worker/icon.svg @@ -0,0 +1,95 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + node + + + diff --git a/kubernetes-worker/layer.yaml b/kubernetes-worker/layer.yaml new file mode 100644 index 0000000..d7a6863 --- /dev/null +++ b/kubernetes-worker/layer.yaml @@ -0,0 +1,76 @@ +"includes": +- "layer:options" +- "layer:basic" +- "layer:status" +- "interface:nrpe-external-master" +- "layer:debug" +- "interface:tls-certificates" +- "layer:cis-benchmark" +- "layer:coordinator" +- "layer:kubernetes-common" +- "interface:container-runtime" +- "layer:apt" +- "layer:snap" +- "layer:leadership" +- "layer:metrics" +- "layer:nagios" +- "layer:tls-client" +- "layer:cdk-service-kicker" +- "layer:kubernetes-node-base" +- "interface:http" +- "interface:kubernetes-cni" +- "interface:kube-control" +- "interface:aws-integration" +- "interface:gcp-integration" +- "interface:openstack-integration" +- "interface:vsphere-integration" +- "interface:prometheus" +- "interface:azure-integration" +- "interface:mount" +"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests", + ".tox", "__pycache__", "Makefile"] +"options": + "coordinator": + # Absolute path to the charmhelpers.coordinator.BaseCoordinator to use. + "class": "charms.coordinator.SimpleCoordinator" + # Layer log level (debug, info, warning, error, critical) + "log_level": "info" + "basic": + "packages": + - "cifs-utils" + - "ceph-common" + - "nfs-common" + - "socat" + "python_packages": [] + "use_venv": !!bool "true" + "include_system_packages": !!bool "false" + "apt": + "packages": [] + "version_package": "" + "full_version": !!bool "false" + "keys": [] + "tls-client": + "ca_certificate_path": "/root/cdk/ca.crt" + "server_certificate_path": "" + "server_key_path": "" + "client_certificate_path": "" + "client_key_path": "" + "cdk-service-kicker": + "services": + - "snap.kubelet.daemon" + - "snap.kube-proxy.daemon" + "status": + "patch-hookenv": !!bool "true" + "debug": {} + "snap": {} + "leadership": {} + "nagios": {} + "cis-benchmark": {} + "kubernetes-common": {} + "kubernetes-node-base": {} + "kubernetes-worker": {} +"repo": "https://github.com/kubernetes/kubernetes.git" +"config": + "deletes": + - "install_from_upstream" +"is": "kubernetes-worker" diff --git a/kubernetes-worker/lib/charms/apt.py b/kubernetes-worker/lib/charms/apt.py new file mode 100644 index 0000000..14508c4 --- /dev/null +++ b/kubernetes-worker/lib/charms/apt.py @@ -0,0 +1,209 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import itertools +import re +import subprocess + +from charmhelpers import fetch +from charmhelpers.core import hookenv, unitdata +from charms import layer, reactive +from charms.layer import status +from charms.reactive import flags + + +__all__ = ['add_source', 'update', 'queue_install', 'install_queued', 'installed', 'purge', 'ensure_package_status'] + + +def add_source(source, key=None): + '''Add an apt source. + + Sets the apt.needs_update flag. + + A source may be either a line that can be added directly to + sources.list(5), or in the form ppa:/ for adding + Personal Package Archives, or a distribution component to enable. + + The package signing key should be an ASCII armoured GPG key. While + GPG key ids are also supported, the retrieval mechanism is insecure. + There is no need to specify the package signing key for PPAs or for + the main Ubuntu archives. + ''' + # Maybe we should remember which sources have been added already + # so we don't waste time re-adding them. Is this time significant? + fetch.add_source(source, key) + reactive.set_flag('apt.needs_update') + + +def queue_install(packages, options=None): + """Queue one or more deb packages for install. + + The `apt.installed.{name}` flag is set once the package is installed. + + If a package has already been installed it will not be reinstalled. + + If a package has already been queued it will not be requeued, and + the install options will not be changed. + + Sets the apt.queued_installs flag. + """ + if isinstance(packages, str): + packages = [packages] + # Filter installed packages. + store = unitdata.kv() + queued_packages = store.getrange('apt.install_queue.', strip=True) + packages = { + package: options + for package in packages + if not (package in queued_packages or reactive.is_flag_set('apt.installed.' + package)) + } + if packages: + unitdata.kv().update(packages, prefix='apt.install_queue.') + reactive.set_flag('apt.queued_installs') + + +def installed(): + '''Return the set of deb packages completed install''' + return set(flag.split('.', 2)[2] for flag in flags.get_flags() if flag.startswith('apt.installed.')) + + +def purge(packages): + """Purge one or more deb packages from the system""" + fetch.apt_purge(packages, fatal=True) + store = unitdata.kv() + store.unsetrange(packages, prefix='apt.install_queue.') + for package in packages: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def update(): + """Update the apt cache. + + Removes the apt.needs_update flag. + """ + status.maintenance('Updating apt cache') + fetch.apt_update(fatal=True) # Friends don't let friends set fatal=False + reactive.clear_flag('apt.needs_update') + + +def install_queued(): + '''Installs queued deb packages. + + Removes the apt.queued_installs flag and sets the apt.installed flag. + + On failure, sets the unit's workload status to 'blocked' and returns + False. Package installs remain queued. + + On success, sets the apt.installed.{packagename} flag for each + installed package and returns True. + ''' + store = unitdata.kv() + queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items()) + + installed = set() + for options, batch in itertools.groupby(queue, lambda x: x[0]): + packages = [b[1] for b in batch] + try: + status.maintenance('Installing {}'.format(','.join(packages))) + fetch.apt_install(packages, options, fatal=True) + store.unsetrange(packages, prefix='apt.install_queue.') + installed.update(packages) + except subprocess.CalledProcessError: + status.blocked('Unable to install packages {}'.format(','.join(packages))) + return False # Without setting reactive flag. + + for package in installed: + reactive.set_flag('apt.installed.{}'.format(package)) + reactive.clear_flag('apt.queued_installs') + + reset_application_version() + + return True + + +def get_package_version(package, full_version=False): + '''Return the version of an installed package. + + If `full_version` is True, returns the full Debian package version. + Otherwise, returns the shorter 'upstream' version number. + ''' + # Don't use fetch.get_upstream_version, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off. + cmd = ['dpkg-query', '--show', r'--showformat=${Version}\n', package] + full = subprocess.check_output(cmd, universal_newlines=True).strip() + if not full_version: + # Attempt to strip off Debian style metadata from the end of the + # version number. + m = re.search(r'^([\d.a-z]+)', full, re.I) + if m is not None: + return m.group(1) + return full + + +def reset_application_version(): + '''Set the Juju application version, per settings in layer.yaml''' + # Reset the application version. We call this after installing + # packages to initialize the version. We also call this every + # hook, incase the version has changed (eg. Landscape upgraded + # the package). + opts = layer.options().get('apt', {}) + pkg = opts.get('version_package') + if pkg and pkg in installed(): + ver = get_package_version(pkg, opts.get('full_version', False)) + hookenv.application_version_set(ver) + + +def ensure_package_status(): + '''Hold or unhold packages per the package_status configuration option. + + All packages installed using this module and handlers are affected. + + An mechanism may be added in the future to override this for a + subset of installed packages. + ''' + packages = installed() + if not packages: + return + config = hookenv.config() + package_status = config.get('package_status') or '' + changed = reactive.data_changed('apt.package_status', (package_status, sorted(packages))) + if changed: + if package_status == 'hold': + hookenv.log('Holding packages {}'.format(','.join(packages))) + fetch.apt_hold(packages) + else: + hookenv.log('Unholding packages {}'.format(','.join(packages))) + fetch.apt_unhold(packages) + reactive.clear_flag('apt.needs_hold') + + +def status_set(state, message): + '''DEPRECATED, set the unit's workload status. + + Set state == None to keep the same state and just change the message. + ''' + if state is None: + state = hookenv.status_get()[0] + if state not in ('active', 'waiting', 'blocked'): + state = 'maintenance' # Guess + status.status_set(state, message) diff --git a/kubernetes-worker/lib/charms/coordinator.py b/kubernetes-worker/lib/charms/coordinator.py new file mode 100644 index 0000000..b954b92 --- /dev/null +++ b/kubernetes-worker/lib/charms/coordinator.py @@ -0,0 +1,144 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import importlib + +from charmhelpers.coordinator import BaseCoordinator +from charmhelpers.core import hookenv +from charms import reactive +import charms.layer + + +__all__ = ['coordinator', 'acquire'] + + +def acquire(lock): + """ + Sets either the coordinator.granted.{lockname} or + coordinator.requested.{lockname} state. + + Returns True if the lock could be immediately granted. + + If locks cannot be granted immediately, they will be granted + in a future hook and the coordinator.granted.{lockname} state set. + """ + global coordinator + if coordinator.acquire(lock): + s = 'coordinator.granted.{}'.format(lock) + if not reactive.is_state(s): + log('Granted {} lock'.format(lock), hookenv.DEBUG) + reactive.set_state('coordinator.granted.{}'.format(lock)) + return True + else: + log('Requested {} lock'.format(lock), hookenv.DEBUG) + reactive.set_state('coordinator.requested.{}'.format(lock)) + return False + + +options = charms.layer.options('coordinator') + + +def log(msg, level=hookenv.INFO): + lmap = {hookenv.DEBUG: 1, + hookenv.INFO: 2, + hookenv.WARNING: 3, + hookenv.ERROR: 4, + hookenv.CRITICAL: 5} + if lmap[level] >= lmap[options.get('log_level', 'DEBUG').upper()]: + hookenv.log('Coordinator: {}'.format(msg), level) + + +class SimpleCoordinator(BaseCoordinator): + '''A simple BaseCoordinator that is suitable for almost all cases. + + Only one unit at a time will be granted locks. All requests by that + unit will be granted. So only one unit may run tasks guarded by a lock, + and the lock name is irrelevant. + ''' + def default_grant(self, lock, unit, granted, queue): + '''Grant locks to only one unit at a time, regardless of the lock name. + + This lets us keep separate locks like join and restart, + while ensuring the operations do not occur on different nodes + at the same time. + ''' + existing_grants = {k: v for k, v in self.grants.items() if v} + + # Return True if this unit has already been granted any lock. + if existing_grants.get(unit): + self.msg('Granting {} to {} (existing grants)'.format(lock, unit), + hookenv.INFO) + return True + + # Return False if another unit has been granted any lock. + if existing_grants: + self.msg('Not granting {} to {} (locks held by {})' + ''.format(lock, unit, ','.join(existing_grants.keys())), + hookenv.INFO) + return False + + # Otherwise, return True if the unit is first in the queue for + # this named lock. + if queue[0] == unit: + self.msg('Granting {} to {} (first in queue)' + ''.format(lock, unit), hookenv.INFO) + return True + else: + self.msg('Not granting {} to {} (not first in queue)' + ''.format(lock, unit), hookenv.INFO) + return False + + def msg(self, msg, level=hookenv.DEBUG): + '''Emit a message.''' + log(msg, level) + + def _save_state(self): + # If the leader aquired a lock, and now released it, + # there may be outstanding requests in the queue from other + # units. We need to grant them now, as we have no guarantee + # of another hook running on the leader for some time (until + # update-status). + self.handle() + super(SimpleCoordinator, self)._save_state() + + +def _instantiate(): + default_name = 'charms.coordinator.SimpleCoordinator' + full_name = options.get('class', default_name) + components = full_name.split('.') + module = '.'.join(components[:-1]) + name = components[-1] + + if not module: + module = 'charms.coordinator' + + class_ = getattr(importlib.import_module(module), name) + + assert issubclass(class_, BaseCoordinator), \ + '{} is not a BaseCoordinator subclass'.format(full_name) + + try: + # The Coordinator layer defines its own peer relation, as it + # can't piggy back on an existing peer relation that may not + # exist. + return class_(peer_relation_name='coordinator') + finally: + log('Using {} coordinator'.format(full_name), hookenv.DEBUG) + + +# Instantiate the BaseCoordinator singleton, which installs +# its charmhelpers.core.atstart() hooks. +coordinator = _instantiate() diff --git a/kubernetes-worker/lib/charms/layer/__init__.py b/kubernetes-worker/lib/charms/layer/__init__.py new file mode 100644 index 0000000..a8e0c64 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/__init__.py @@ -0,0 +1,60 @@ +import sys +from importlib import import_module +from pathlib import Path + + +def import_layer_libs(): + """ + Ensure that all layer libraries are imported. + + This makes it possible to do the following: + + from charms import layer + + layer.foo.do_foo_thing() + + Note: This function must be called after bootstrap. + """ + for module_file in Path('lib/charms/layer').glob('*'): + module_name = module_file.stem + if module_name in ('__init__', 'basic', 'execd') or not ( + module_file.suffix == '.py' or module_file.is_dir() + ): + continue + import_module('charms.layer.{}'.format(module_name)) + + +# Terrible hack to support the old terrible interface. +# Try to get people to call layer.options.get() instead so +# that we can remove this garbage. +# Cribbed from https://stackoverfLow.com/a/48100440/4941864 +class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__): + def __call__(self, section=None, layer_file=None): + if layer_file is None: + return self.get(section=section) + else: + return self.get(section=section, + layer_file=Path(layer_file)) + + +def patch_options_interface(): + from charms.layer import options + if sys.version_info.minor >= 5: + options.__class__ = OptionsBackwardsCompatibilityHack + else: + # Py 3.4 doesn't support changing the __class__, so we have to do it + # another way. The last line is needed because we already have a + # reference that doesn't get updated with sys.modules. + name = options.__name__ + hack = OptionsBackwardsCompatibilityHack(name) + hack.get = options.get + sys.modules[name] = hack + sys.modules[__name__].options = hack + + +try: + patch_options_interface() +except ImportError: + # This may fail if pyyaml hasn't been installed yet. But in that + # case, the bootstrap logic will try it again once it has. + pass diff --git a/kubernetes-worker/lib/charms/layer/basic.py b/kubernetes-worker/lib/charms/layer/basic.py new file mode 100644 index 0000000..9122f7c --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/basic.py @@ -0,0 +1,508 @@ +import os +import sys +import re +import shutil +from distutils.version import LooseVersion +from pkg_resources import Requirement +from glob import glob +from subprocess import check_call, check_output, CalledProcessError +from time import sleep + +from charms import layer +from charms.layer.execd import execd_preinstall + + +def _get_subprocess_env(): + env = os.environ.copy() + env['LANG'] = env.get('LANG', 'C.UTF-8') + return env + + +def get_series(): + """ + Return series for a few known OS:es. + Tested as of 2019 november: + * centos6, centos7, rhel6. + * bionic + """ + series = "" + + # Looking for content in /etc/os-release + # works for ubuntu + some centos + if os.path.isfile('/etc/os-release'): + d = {} + with open('/etc/os-release', 'r') as rel: + for l in rel: + if not re.match(r'^\s*$', l): + k, v = l.split('=') + d[k.strip()] = v.strip().replace('"', '') + series = "{ID}{VERSION_ID}".format(**d) + + # Looking for content in /etc/redhat-release + # works for redhat enterprise systems + elif os.path.isfile('/etc/redhat-release'): + with open('/etc/redhat-release', 'r') as redhatlsb: + # CentOS Linux release 7.7.1908 (Core) + line = redhatlsb.readline() + release = int(line.split("release")[1].split()[0][0]) + series = "centos" + str(release) + + # Looking for content in /etc/lsb-release + # works for ubuntu + elif os.path.isfile('/etc/lsb-release'): + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + series = d['DISTRIB_CODENAME'] + + # This is what happens if we cant figure out the OS. + else: + series = "unknown" + return series + + +def bootstrap_charm_deps(): + """ + Set up the base charm dependencies so that the reactive system can run. + """ + # execd must happen first, before any attempt to install packages or + # access the network, because sites use this hook to do bespoke + # configuration and install secrets so the rest of this bootstrap + # and the charm itself can actually succeed. This call does nothing + # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d. + execd_preinstall() + # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts + + series = get_series() + + # OMG?! is build-essentials needed? + ubuntu_packages = ['python3-pip', + 'python3-setuptools', + 'python3-yaml', + 'python3-dev', + 'python3-wheel', + 'build-essential'] + + # I'm not going to "yum group info "Development Tools" + # omitting above madness + centos_packages = ['python3-pip', + 'python3-setuptools', + 'python3-devel', + 'python3-wheel'] + + packages_needed = [] + if 'centos' in series: + packages_needed = centos_packages + else: + packages_needed = ubuntu_packages + + charm_dir = os.environ['JUJU_CHARM_DIR'] + os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin') + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpip = os.path.join(vbin, 'pip') + vpy = os.path.join(vbin, 'python') + hook_name = os.path.basename(sys.argv[0]) + is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped') + is_charm_upgrade = hook_name == 'upgrade-charm' + is_series_upgrade = hook_name == 'post-series-upgrade' + is_post_upgrade = os.path.exists('wheelhouse/.upgraded') + is_upgrade = (not is_post_upgrade and + (is_charm_upgrade or is_series_upgrade)) + if is_bootstrapped and not is_upgrade: + # older subordinates might have downgraded charm-env, so we should + # restore it if necessary + install_or_update_charm_env() + activate_venv() + # the .upgrade file prevents us from getting stuck in a loop + # when re-execing to activate the venv; at this point, we've + # activated the venv, so it's safe to clear it + if is_post_upgrade: + os.unlink('wheelhouse/.upgraded') + return + if os.path.exists(venv): + try: + # focal installs or upgrades prior to PR 160 could leave the venv + # in a broken state which would prevent subsequent charm upgrades + _load_installed_versions(vpip) + except CalledProcessError: + is_broken_venv = True + else: + is_broken_venv = False + if is_upgrade or is_broken_venv: + # All upgrades should do a full clear of the venv, rather than + # just updating it, to bring in updates to Python itself + shutil.rmtree(venv) + if is_upgrade: + if os.path.exists('wheelhouse/.bootstrapped'): + os.unlink('wheelhouse/.bootstrapped') + # bootstrap wheelhouse + if os.path.exists('wheelhouse'): + pre_eoan = series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty', + 'ubuntu16.04', 'xenial', + 'ubuntu18.04', 'bionic') + pydistutils_lines = [ + "[easy_install]\n", + "find_links = file://{}/wheelhouse/\n".format(charm_dir), + "no_index=True\n", + "index_url=\n", # deliberately nothing here; disables it. + ] + if pre_eoan: + pydistutils_lines.append("allow_hosts = ''\n") + with open('/root/.pydistutils.cfg', 'w') as fp: + # make sure that easy_install also only uses the wheelhouse + # (see https://github.com/pypa/pip/issues/410) + fp.writelines(pydistutils_lines) + if 'centos' in series: + yum_install(packages_needed) + else: + apt_install(packages_needed) + from charms.layer import options + cfg = options.get('basic') + # include packages defined in layer.yaml + if 'centos' in series: + yum_install(cfg.get('packages', [])) + else: + apt_install(cfg.get('packages', [])) + # if we're using a venv, set it up + if cfg.get('use_venv'): + if not os.path.exists(venv): + series = get_series() + if series in ('ubuntu12.04', 'precise', + 'ubuntu14.04', 'trusty'): + apt_install(['python-virtualenv']) + elif 'centos' in series: + yum_install(['python-virtualenv']) + else: + apt_install(['virtualenv']) + cmd = ['virtualenv', '-ppython3', '--never-download', venv] + if cfg.get('include_system_packages'): + cmd.append('--system-site-packages') + check_call(cmd, env=_get_subprocess_env()) + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + pip = vpip + else: + pip = 'pip3' + # save a copy of system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip'): + shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save') + pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm'] + # we bundle these packages to work around bugs in older versions (such + # as https://github.com/pypa/pip/issues/56), but if the system already + # provided a newer version, downgrading it can cause other problems + _update_if_newer(pip, pre_install_pkgs) + # install the rest of the wheelhouse deps (extract the pkg names into + # a set so that we can ignore the pre-install packages and let pip + # choose the best version in case there are multiple from layer + # conflicts) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # Jinja2 3+ relies on MarkupSafe actually being installed prior to + # attempting to be installed from the wheelhouse. Thus, if MarkupSafe + # and/or wheel are in _pkgs, then install them first. + _pre_packages = [p for p in _pkgs if p in ('wheel', 'MarkupSafe')] + _pkgs = [p for p in _pkgs if p not in _pre_packages] + for _pkgs_set in (_pre_packages, _pkgs): + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs_set, _versions) + reinstall_flag = '--force-reinstall' + # if not cfg.get('use_venv', True) and pre_eoan: + if not cfg.get('use_venv', True): + reinstall_flag = '--ignore-installed' + check_call([pip, 'install', '-U', reinstall_flag, '--no-index', + '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs), + env=_get_subprocess_env()) + # re-enable installation from pypi + os.remove('/root/.pydistutils.cfg') + + # install pyyaml for centos7, since, unlike the ubuntu image, the + # default image for centos doesn't include pyyaml; see the discussion: + # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin + if 'centos' in series: + check_call([pip, 'install', '-U', 'pyyaml'], + env=_get_subprocess_env()) + + # install python packages from layer options + if cfg.get('python_packages'): + check_call([pip, 'install', '-U'] + cfg.get('python_packages'), + env=_get_subprocess_env()) + if not cfg.get('use_venv'): + # restore system pip to prevent `pip3 install -U pip` + # from changing it + if os.path.exists('/usr/bin/pip.save'): + shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip') + os.remove('/usr/bin/pip.save') + # setup wrappers to ensure envs are used for scripts + install_or_update_charm_env() + for wrapper in ('charms.reactive', 'charms.reactive.sh', + 'chlp', 'layer_option'): + src = os.path.join('/usr/local/sbin', 'charm-env') + dst = os.path.join('/usr/local/sbin', wrapper) + if not os.path.exists(dst): + os.symlink(src, dst) + if cfg.get('use_venv'): + shutil.copy2('bin/layer_option', vbin) + else: + shutil.copy2('bin/layer_option', '/usr/local/bin/') + # re-link the charm copy to the wrapper in case charms + # call bin/layer_option directly (as was the old pattern) + os.remove('bin/layer_option') + os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option') + # flag us as having already bootstrapped so we don't do it again + open('wheelhouse/.bootstrapped', 'w').close() + if is_upgrade: + # flag us as having already upgraded so we don't do it again + open('wheelhouse/.upgraded', 'w').close() + # Ensure that the newly bootstrapped libs are available. + # Note: this only seems to be an issue with namespace packages. + # Non-namespace-package libs (e.g., charmhelpers) are available + # without having to reload the interpreter. :/ + reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0]) + + +def _load_installed_versions(pip): + pip_freeze = check_output([pip, 'freeze']).decode('utf8') + versions = {} + for pkg_ver in pip_freeze.splitlines(): + try: + req = Requirement.parse(pkg_ver) + except ValueError: + continue + versions.update({ + req.project_name: LooseVersion(ver) + for op, ver in req.specs if op == '==' + }) + return versions + + +def _load_wheelhouse_versions(): + versions = {} + for wheel in glob('wheelhouse/*'): + pkg, ver = os.path.basename(wheel).rsplit('-', 1) + # nb: LooseVersion ignores the file extension + versions[pkg.replace('_', '-')] = LooseVersion(ver) + return versions + + +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + +def _update_if_newer(pip, pkgs): + installed = _load_installed_versions(pip) + wheelhouse = _load_wheelhouse_versions() + for pkg in pkgs: + if pkg not in installed or wheelhouse[pkg] > installed[pkg]: + check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse', + pkg], env=_get_subprocess_env()) + + +def install_or_update_charm_env(): + # On Trusty python3-pkg-resources is not installed + try: + from pkg_resources import parse_version + except ImportError: + apt_install(['python3-pkg-resources']) + from pkg_resources import parse_version + + try: + installed_version = parse_version( + check_output(['/usr/local/sbin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + installed_version = parse_version('0.0.0') + try: + bundled_version = parse_version( + check_output(['bin/charm-env', + '--version']).decode('utf8')) + except (CalledProcessError, FileNotFoundError): + bundled_version = parse_version('0.0.0') + if installed_version < bundled_version: + shutil.copy2('bin/charm-env', '/usr/local/sbin/') + + +def activate_venv(): + """ + Activate the venv if enabled in ``layer.yaml``. + + This is handled automatically for normal hooks, but actions might + need to invoke this manually, using something like: + + # Load modules from $JUJU_CHARM_DIR/lib + import sys + sys.path.append('lib') + + from charms.layer.basic import activate_venv + activate_venv() + + This will ensure that modules installed in the charm's + virtual environment are available to the action. + """ + from charms.layer import options + venv = os.path.abspath('../.venv') + vbin = os.path.join(venv, 'bin') + vpy = os.path.join(vbin, 'python') + use_venv = options.get('basic', 'use_venv') + if use_venv and '.venv' not in sys.executable: + # activate the venv + os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) + reload_interpreter(vpy) + layer.patch_options_interface() + layer.import_layer_libs() + + +def reload_interpreter(python): + """ + Reload the python interpreter to ensure that all deps are available. + + Newly installed modules in namespace packages sometimes seemt to + not be picked up by Python 3. + """ + os.execve(python, [python] + list(sys.argv), os.environ) + + +def apt_install(packages): + """ + Install apt packages. + + This ensures a consistent set of options that are often missed but + should really be set. + """ + if isinstance(packages, (str, bytes)): + packages = [packages] + + env = _get_subprocess_env() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + cmd = ['apt-get', + '--option=Dpkg::Options::=--force-confold', + '--assume-yes', + 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: # third attempt + raise + try: + # sometimes apt-get update needs to be run + check_call(['apt-get', 'update'], env=env) + except CalledProcessError: + # sometimes it's a dpkg lock issue + pass + sleep(5) + else: + break + + +def yum_install(packages): + """ Installs packages with yum. + This function largely mimics the apt_install function for consistency. + """ + if packages: + env = os.environ.copy() + cmd = ['yum', '-y', 'install'] + for attempt in range(3): + try: + check_call(cmd + packages, env=env) + except CalledProcessError: + if attempt == 2: + raise + try: + check_call(['yum', 'update'], env=env) + except CalledProcessError: + pass + sleep(5) + else: + break + else: + pass + + +def init_config_states(): + import yaml + from charmhelpers.core import hookenv + from charms.reactive import set_state + from charms.reactive import toggle_state + config = hookenv.config() + config_defaults = {} + config_defs = {} + config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml') + if os.path.exists(config_yaml): + with open(config_yaml) as fp: + config_defs = yaml.safe_load(fp).get('options', {}) + config_defaults = {key: value.get('default') + for key, value in config_defs.items()} + for opt in config_defs.keys(): + if config.changed(opt): + set_state('config.changed') + set_state('config.changed.{}'.format(opt)) + toggle_state('config.set.{}'.format(opt), config.get(opt)) + toggle_state('config.default.{}'.format(opt), + config.get(opt) == config_defaults[opt]) + + +def clear_config_states(): + from charmhelpers.core import hookenv, unitdata + from charms.reactive import remove_state + config = hookenv.config() + remove_state('config.changed') + for opt in config.keys(): + remove_state('config.changed.{}'.format(opt)) + remove_state('config.set.{}'.format(opt)) + remove_state('config.default.{}'.format(opt)) + unitdata.kv().flush() diff --git a/kubernetes-worker/lib/charms/layer/execd.py b/kubernetes-worker/lib/charms/layer/execd.py new file mode 100644 index 0000000..438d9a1 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/execd.py @@ -0,0 +1,114 @@ +# Copyright 2014-2016 Canonical Limited. +# +# This file is part of layer-basic, the reactive base layer for Juju. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# This module may only import from the Python standard library. +import os +import sys +import subprocess +import time + +''' +execd/preinstall + +Read the layer-basic docs for more info on how to use this feature. +https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support +''' + + +def default_execd_dir(): + return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_sentinel_path(submodule_path): + module_path = os.path.dirname(submodule_path) + execd_path = os.path.dirname(module_path) + module_name = os.path.basename(module_path) + submodule_name = os.path.basename(submodule_path) + return os.path.join(execd_path, + '.{}_{}.done'.format(module_name, submodule_name)) + + +def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None): + """Run command for each module within execd_dir which defines it.""" + if stderr is None: + stderr = sys.stdout + for submodule_path in execd_submodule_paths(command, execd_dir): + # Only run each execd once. We cannot simply run them in the + # install hook, as potentially storage hooks are run before that. + # We cannot rely on them being idempotent. + sentinel = execd_sentinel_path(submodule_path) + if os.path.exists(sentinel): + continue + + try: + subprocess.check_call([submodule_path], stderr=stderr, + universal_newlines=True) + with open(sentinel, 'w') as f: + f.write('{} ran successfully {}\n'.format(submodule_path, + time.ctime())) + f.write('Removing this file will cause it to be run again\n') + except subprocess.CalledProcessError as e: + # Logs get the details. We can't use juju-log, as the + # output may be substantial and exceed command line + # length limits. + print("ERROR ({}) running {}".format(e.returncode, e.cmd), + file=stderr) + print("STDOUT<>> `get_version('kubelet') + (1, 6, 0) + + """ + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def retry(times, delay_secs): + """Decorator for retrying a method call. + + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """Decorator to wrap the function provided. + + Args: + func: Provided function should return either True od False + + Returns: A callable that would return the last call outcome + + """ + + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + + return _wrapped + + return retry_decorator + + +def calculate_resource_checksum(resource): + """Calculate a checksum for a resource""" + md5 = hashlib.md5() + path = hookenv.resource_get(resource) + if path: + with open(path, "rb") as f: + data = f.read() + md5.update(data) + return md5.hexdigest() + + +def get_resource_checksum_db_key(checksum_prefix, resource): + """Convert a resource name to a resource checksum database key.""" + return checksum_prefix + resource + + +def migrate_resource_checksums(checksum_prefix, snap_resources): + """Migrate resource checksums from the old schema to the new one""" + for resource in snap_resources: + new_key = get_resource_checksum_db_key(checksum_prefix, resource) + if not db.get(new_key): + path = hookenv.resource_get(resource) + if path: + # old key from charms.reactive.helpers.any_file_changed + old_key = "reactive.files_changed." + path + old_checksum = db.get(old_key) + db.set(new_key, old_checksum) + else: + # No resource is attached. Previously, this meant no checksum + # would be calculated and stored. But now we calculate it as if + # it is a 0-byte resource, so let's go ahead and do that. + zero_checksum = hashlib.md5().hexdigest() + db.set(new_key, zero_checksum) + + +def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + hookenv.status_set("maintenance", "Checking resources") + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + old_checksum = db.get(key) + new_checksum = calculate_resource_checksum(resource) + if new_checksum != old_checksum: + return True + return False + + +def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + checksum = calculate_resource_checksum(resource) + db.set(key, checksum) + + +def get_ingress_address(endpoint_name, ignore_addresses=None): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + # if they don't have ingress-addresses they are running a juju that + # doesn't support spaces, so just return the private address + return hookenv.unit_get("private-address") + + excluded_ips = [] + excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"] + for addr in network_info["bind-addresses"]: + for prefix in excluded_interfaces: + if addr["interface-name"].startswith(prefix): + for ip in addr["addresses"]: + excluded_ips.append(ip["value"]) + + ingress_addresses = network_info["ingress-addresses"] + network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips] + + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) + + # Need to prefer non-fan IP addresses due to various issues, e.g. + # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 + # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll + # prioritize those last. Not technically correct, but good enough. + try: + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 + addresses = sorted(addresses, key=sort_key) + except Exception: + hookenv.log(traceback.format_exc()) + + return addresses[0] + + +def get_ingress_address6(endpoint_name): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + return None + + addresses = network_info["ingress-addresses"] + + for addr in addresses: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version == 6: + return str(ip_addr) + else: + return None + + +def service_restart(service_name): + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) + host.service_restart(service_name) + + +def service_start(service_name): + hookenv.log("Starting {0} service.".format(service_name)) + host.service_stop(service_name) + + +def service_stop(service_name): + hookenv.log("Stopping {0} service.".format(service_name)) + host.service_stop(service_name) + + +def arch(): + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" + # Get the package architecture for this system. + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode("utf-8") + return architecture + + +def get_service_ip(service, namespace="kube-system", errors_fatal=True): + try: + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) + except CalledProcessError: + if errors_fatal: + raise + else: + return None + else: + svc = json.loads(output.decode()) + return svc["spec"]["clusterIP"] + + +def kubectl(*args): + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) + return check_output(command) + + +def kubectl_success(*args): + """Runs kubectl with the given args. Returns True if successful, False if + not.""" + try: + kubectl(*args) + return True + except CalledProcessError: + return False + + +def kubectl_manifest(operation, manifest): + """Wrap the kubectl creation command when using filepath resources + :param operation - one of get, create, delete, replace + :param manifest - filepath to the manifest + """ + # Deletions are a special case + if operation == "delete": + # Ensure we immediately remove requested resources with --now + return kubectl_success(operation, "-f", manifest, "--now") + else: + # Guard against an error re-creating the same manifest multiple times + if operation == "create": + # If we already have the definition, its probably safe to assume + # creation was true. + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) + return True + # Execute the requested command that did not match any of the special + # cases above + return kubectl_success(operation, "-f", manifest) + + +def get_node_name(): + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": + return getfqdn().lower() + else: + return gethostname().lower() + + +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied + arguments for values of the Kubernetes server, CA, key, certificate, user + context and cluster.""" + if not key and not certificate and not password and not token: + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") + + # Create the config file with the address of the master server. + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) + # Delete old users + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) + # Create the credentials using the client flags. + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) + + if key and certificate: + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) + # Create a default context with the cluster. + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) + # Make the config use this new context. + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) + if keystone: + # create keystone user + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) + check_call(split(cmd)) + # create keystone context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) + check_call(split(cmd)) + # use keystone context + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) + check_call(split(cmd)) + # manually add exec command until kubectl can do it for us + with open(new_kubeconfig, "r") as f: + content = f.read() + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user + user: + exec: + command: "/snap/bin/client-keystone-auth" + apiVersion: "client.authentication.k8s.io/v1beta1" +""", + ) + with open(new_kubeconfig, "w") as f: + f.write(content) + if aws_iam_cluster_id: + # create aws-iam context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) + + # append a user for aws-iam + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) + + # not going to use aws-iam context by default since we don't have + # the desired arn. This will make the config not usable if copied. + + # cmd = 'kubectl config --kubeconfig={0} ' \ + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) + # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) + + +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, "").split() + args = {} + + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + + return args + + +def configure_kubernetes_service(key, service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = key + service + prev_snap_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + args.update(base_args) + args.update(extra_args) + + # CIS benchmark action may inject kv config to pass failing tests. Merge + # these after the func args as they should take precedence. + cis_args_key = "cis-" + service + cis_args = db.get(cis_args_key) or {} + args.update(cis_args) + + # Remove any args with 'None' values (all k8s args are 'k=v') and + # construct an arg string for use by 'snap set'. + args = {k: v for k, v in args.items() if v is not None} + args = ['--%s="%s"' % arg for arg in args.items()] + args = " ".join(args) + + snap_opts = {} + for arg in prev_snap_args: + # remove previous args by setting to null + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] + + cmd = ["snap", "set", service] + snap_opts + check_call(cmd) + + # Now that we've started doing snap configuration through the "args" + # option, we should never need to clear previous args again. + db.set(prev_args_key, {}) + + +def _snap_common_path(component): + return Path("/var/snap/{}/common".format(component)) + + +def cloud_config_path(component): + return _snap_common_path(component) / "cloud-config.conf" + + +def _gcp_creds_path(component): + return _snap_common_path(component) / "gcp-creds.json" + + +def _daemon_env_path(component): + return _snap_common_path(component) / "environment" + + +def _cloud_endpoint_ca_path(component): + return _snap_common_path(component) / "cloud-endpoint-ca.crt" + + +def encryption_config_path(): + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" + + +def write_gcp_snap_config(component): + # gcp requires additional credentials setup + gcp = endpoint_from_flag("endpoint.gcp.ready") + creds_path = _gcp_creds_path(component) + with creds_path.open("w") as fp: + os.fchmod(fp.fileno(), 0o600) + fp.write(gcp.credentials) + + # create a cloud-config file that sets token-url to nil to make the + # services use the creds env var instead of the metadata server, as + # well as making the cluster multizone + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) + + daemon_env_path = _daemon_env_path(component) + if daemon_env_path.exists(): + daemon_env = daemon_env_path.read_text() + if not daemon_env.endswith("\n"): + daemon_env += "\n" + else: + daemon_env = "" + if gcp_creds_env_key not in daemon_env: + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) + daemon_env_path.parent.mkdir(parents=True, exist_ok=True) + daemon_env_path.write_text(daemon_env) + + +def generate_openstack_cloud_config(): + # openstack requires additional credentials setup + openstack = endpoint_from_flag("endpoint.openstack.ready") + + lines = [ + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), + ] + if openstack.endpoint_tls_ca: + lines.append("ca-file = /etc/config/endpoint-ca.cert") + + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) + + if openstack.has_octavia in (True, None): + # Newer integrator charm will detect whether underlying OpenStack has + # Octavia enabled so we can set this intelligently. If we're still + # related to an older integrator, though, default to assuming Octavia + # is available. + lines.append("use-octavia = true") + else: + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") + if openstack.subnet_id: + lines.append("subnet-id = {}".format(openstack.subnet_id)) + if openstack.floating_network_id: + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) + if openstack.lb_method: + lines.append("lb-method = {}".format(openstack.lb_method)) + if openstack.internal_lb: + lines.append("internal-lb = true") + if openstack.manage_security_groups: + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") + if openstack.bs_version is not None: + lines.append("bs-version = {}".format(openstack.bs_version)) + if openstack.trust_device_path is not None: + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) + if openstack.ignore_volume_az is not None: + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" + + +def write_azure_snap_config(component): + azure = endpoint_from_flag("endpoint.azure.ready") + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) + + +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): + kube_proxy_opts = {} + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" + num_apis = len(api_servers) + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() + if bind_address: + kube_proxy_opts["bind-address"] = bind_address + elif is_ipv6(cluster_cidr): + kube_proxy_opts["bind-address"] = "::" + + if host.is_container(): + kube_proxy_opts["conntrack-max-per-core"] = "0" + + feature_gates = [] + + if is_dual_stack(cluster_cidr): + feature_gates.append("IPv6DualStack=true") + + if is_state("endpoint.aws.ready"): + feature_gates.append("CSIMigrationAWS=false") + elif is_state("endpoint.gcp.ready"): + feature_gates.append("CSIMigrationGCE=false") + elif is_state("endpoint.azure.ready"): + feature_gates.append("CSIMigrationAzureDisk=false") + + kube_proxy_opts["feature-gates"] = ",".join(feature_gates) + + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) + + +def get_unit_number(): + return int(hookenv.local_unit().split("/")[1]) + + +def cluster_cidr(): + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") + if not cni: + return None + config = hookenv.config() + if "default-cni" in config: + # master + default_cni = config["default-cni"] + else: + # worker + kube_control = endpoint_from_flag("kube-control.dns.available") + if not kube_control: + return None + default_cni = kube_control.get_default_cni() + return cni.get_config(default=default_cni)["cidr"] + + +def is_dual_stack(cidrs): + """Detect IPv4/IPv6 dual stack from CIDRs""" + return {net.version for net in get_networks(cidrs)} == {4, 6} + + +def is_ipv4(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv4_network(cidrs) is not None + + +def is_ipv6(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv6_network(cidrs) is not None + + +def is_ipv6_preferred(cidrs): + """Detect if IPv6 is preffered from CIDRs""" + return get_networks(cidrs)[0].version == 6 + + +def get_networks(cidrs): + """Convert a comma-separated list of CIDRs to a list of networks.""" + if not cidrs: + return [] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] + + +def get_ipv4_network(cidrs): + """Get the IPv4 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(4) + + +def get_ipv6_network(cidrs): + """Get the IPv6 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(6) + + +def enable_ipv6_forwarding(): + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) + + +def _as_address(addr_str): + try: + return ipaddress.ip_address(addr_str) + except ValueError: + return None + + +def get_bind_addrs(ipv4=True, ipv6=True): + try: + output = check_output(["ip", "-j", "-br", "addr", "show", "scope", "global"]) + except CalledProcessError: + # stderr will have any details, and go to the log + hookenv.log("Unable to determine global addresses", hookenv.ERROR) + return [] + + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") + accept_versions = set() + if ipv4: + accept_versions.add(4) + if ipv6: + accept_versions.add(6) + + addrs = [] + for addr in json.loads(output.decode("utf8")): + if addr["operstate"].upper() != "UP" or any( + addr["ifname"].startswith(prefix) for prefix in ignore_interfaces + ): + continue + + for ifc in addr["addr_info"]: + local_addr = _as_address(ifc.get("local")) + if local_addr and local_addr.version in accept_versions: + addrs.append(str(local_addr)) + + return addrs + + +class InvalidVMwareHost(Exception): + pass + + +def _get_vmware_uuid(): + serial_id_file = "/sys/class/dmi/id/product_serial" + # The serial id from VMWare VMs comes in following format: + # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 + try: + with open(serial_id_file, "r") as f: + serial_string = f.read().strip() + if "VMware-" not in serial_string: + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) + raise InvalidVMwareHost + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) + uuid = "%s-%s-%s-%s-%s" % ( + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) + except IOError as err: + hookenv.log("Unable to read UUID from sysfs: {}".format(err)) + uuid = "UNKNOWN" + + return uuid + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None + + +def get_node_ip(): + """Determines the preferred NodeIP value for this node.""" + cidr = cluster_cidr() + if not cidr: + return None + if is_ipv6_preferred(cidr): + return get_ingress_address6("kube-control") + else: + return get_ingress_address("kube-control") + + +def merge_kubelet_extra_config(config, extra_config): + """Updates config to include the contents of extra_config. This is done + recursively to allow deeply nested dictionaries to be merged. + + This is destructive: it modifies the config dict that is passed in. + """ + for k, extra_config_value in extra_config.items(): + if isinstance(extra_config_value, dict): + config_value = config.setdefault(k, {}) + merge_kubelet_extra_config(config_value, extra_config_value) + else: + config[k] = extra_config_value + + +def workaround_lxd_kernel_params(): + """ + Workaround for kubelet not starting in LXD when kernel params are not set + to the desired values. + """ + if host.is_container(): + hookenv.log("LXD detected, faking kernel params via bind mounts") + root_dir = "/root/cdk/lxd-kernel-params" + os.makedirs(root_dir, exist_ok=True) + # Kernel params taken from: + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/kubelet/cm/container_manager_linux.go#L421-L426 + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/util/sysctl/sysctl.go#L30-L64 + params = { + "vm.overcommit_memory": 1, + "vm.panic_on_oom": 0, + "kernel.panic": 10, + "kernel.panic_on_oops": 1, + "kernel.keys.root_maxkeys": 1000000, + "kernel.keys.root_maxbytes": 1000000 * 25, + } + for param, param_value in params.items(): + fake_param_path = root_dir + "/" + param + with open(fake_param_path, "w") as f: + f.write(str(param_value)) + real_param_path = "/proc/sys/" + param.replace(".", "/") + host.fstab_add(fake_param_path, real_param_path, "none", "bind") + subprocess.check_call(["mount", "-a"]) + else: + hookenv.log("LXD not detected, not faking kernel params") + + +def get_sandbox_image_uri(registry): + return "{}/pause:3.6".format(registry) + + +def configure_kubelet(dns_domain, dns_ip, registry, taints=None, has_xcp=False): + kubelet_opts = {} + kubelet_opts["kubeconfig"] = kubelet_kubeconfig_path + kubelet_opts["v"] = "0" + kubelet_opts["logtostderr"] = "true" + kubelet_opts["node-ip"] = get_node_ip() + + container_runtime = endpoint_from_flag("endpoint.container-runtime.available") + + kubelet_opts["container-runtime"] = container_runtime.get_runtime() + if kubelet_opts["container-runtime"] == "remote": + kubelet_opts["container-runtime-endpoint"] = container_runtime.get_socket() + + feature_gates = {} + + kubelet_cloud_config_path = cloud_config_path("kubelet") + if has_xcp: + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.aws.ready"): + kubelet_opts["cloud-provider"] = "aws" + feature_gates["CSIMigrationAWS"] = False + elif is_state("endpoint.gcp.ready"): + kubelet_opts["cloud-provider"] = "gce" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + feature_gates["CSIMigrationGCE"] = False + elif is_state("endpoint.openstack.ready"): + kubelet_opts["cloud-provider"] = "external" + elif is_state("endpoint.vsphere.joined"): + # vsphere just needs to be joined on the worker (vs 'ready') + kubelet_opts["cloud-provider"] = "vsphere" + # NB: vsphere maps node product-id to its uuid (no config file needed). + uuid = _get_vmware_uuid() + kubelet_opts["provider-id"] = "vsphere://{}".format(uuid) + elif is_state("endpoint.azure.ready"): + azure = endpoint_from_flag("endpoint.azure.ready") + kubelet_opts["cloud-provider"] = "azure" + kubelet_opts["cloud-config"] = str(kubelet_cloud_config_path) + kubelet_opts["provider-id"] = azure.vm_id + feature_gates["CSIMigrationAzureDisk"] = False + + # Put together the KubeletConfiguration data + kubelet_config = { + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + "address": "0.0.0.0", + "authentication": { + "anonymous": {"enabled": False}, + "x509": {"clientCAFile": str(ca_crt_path)}, + }, + # NB: authz webhook config tells the kubelet to ask the api server + # if a request is authorized; it is not related to the authn + # webhook config of the k8s master services. + "authorization": {"mode": "Webhook"}, + "clusterDomain": dns_domain, + "failSwapOn": False, + "port": 10250, + "protectKernelDefaults": True, + "readOnlyPort": 0, + "tlsCertFile": str(server_crt_path), + "tlsPrivateKeyFile": str(server_key_path), + } + if dns_ip: + kubelet_config["clusterDNS"] = [dns_ip] + + # Handle feature gates + if get_version("kubelet") >= (1, 19): + # NB: required for CIS compliance + feature_gates["RotateKubeletServerCertificate"] = True + if is_state("kubernetes-worker.gpu.enabled"): + feature_gates["DevicePlugins"] = True + if feature_gates: + kubelet_config["featureGates"] = feature_gates + if is_dual_stack(cluster_cidr()): + feature_gates = kubelet_config.setdefault("featureGates", {}) + feature_gates["IPv6DualStack"] = True + + # Workaround for DNS on bionic + # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 + resolv_path = os.path.realpath("/etc/resolv.conf") + if resolv_path == "/run/systemd/resolve/stub-resolv.conf": + kubelet_config["resolvConf"] = "/run/systemd/resolve/resolv.conf" + + # Add kubelet-extra-config. This needs to happen last so that it + # overrides any config provided by the charm. + kubelet_extra_config = hookenv.config("kubelet-extra-config") + kubelet_extra_config = yaml.safe_load(kubelet_extra_config) + merge_kubelet_extra_config(kubelet_config, kubelet_extra_config) + + # Render the file and configure Kubelet to use it + os.makedirs("/root/cdk/kubelet", exist_ok=True) + with open("/root/cdk/kubelet/config.yaml", "w") as f: + f.write("# Generated by kubernetes-worker charm, do not edit\n") + yaml.dump(kubelet_config, f) + kubelet_opts["config"] = "/root/cdk/kubelet/config.yaml" + + # If present, ensure kubelet gets the pause container from the configured + # registry. When not present, kubelet uses a default image location + # (currently k8s.gcr.io/pause:3.4.1). + if registry: + kubelet_opts["pod-infra-container-image"] = get_sandbox_image_uri(registry) + + if taints: + kubelet_opts["register-with-taints"] = ",".join(taints) + + workaround_lxd_kernel_params() + + configure_kubernetes_service( + "kubernetes-common.prev-args.", "kubelet", kubelet_opts, "kubelet-extra-args" + ) + + +def configure_default_cni(default_cni): + """Set the default CNI configuration to be used by CNI clients + (kubelet, containerd). + + CNI clients choose whichever CNI config in /etc/cni/net.d/ is + alphabetically first, so we accomplish this by creating a file named + /etc/cni/net.d/05-default.conflist, which is alphabetically earlier than + typical CNI config names, e.g. 10-flannel.conflist and 10-calico.conflist + + The created 05-default.conflist file is a symlink to whichever CNI config + is actually going to be used. + """ + # Clean up current default + cni_conf_dir = "/etc/cni/net.d" + for filename in os.listdir(cni_conf_dir): + if filename.startswith("05-default."): + os.remove(cni_conf_dir + "/" + filename) + + # Set new default + cni = endpoint_from_flag("cni.available") + cni_conf = cni.get_config(default=default_cni) + source = cni_conf["cni-conf-file"] + dest = cni_conf_dir + "/" + "05-default." + source.split(".")[-1] + os.symlink(source, dest) diff --git a/kubernetes-worker/lib/charms/layer/kubernetes_node_base.py b/kubernetes-worker/lib/charms/layer/kubernetes_node_base.py new file mode 100644 index 0000000..ba49416 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/kubernetes_node_base.py @@ -0,0 +1,121 @@ +"""Library shared between kubernetes control plane and kubernetes worker charms.""" + +from subprocess import call +from os import PathLike +import time +from typing import Union, List + +from charms.layer.kubernetes_common import get_node_name +from charms.reactive import is_state +from charmhelpers.core import hookenv, unitdata + +db = unitdata.kv() + + +class LabelMaker: + """Use to apply labels to a kubernetes node.""" + + class NodeLabelError(Exception): + """Raised when there's an error labeling a node.""" + + pass + + def __init__(self, kubeconfig_path: Union[PathLike, str]): + self.kubeconfig_path = kubeconfig_path + self.node = get_node_name() + + @staticmethod + def _retried_call(cmd: List[str], retry_msg: str, timeout: int = 180) -> bool: + deadline = time.time() + timeout + while time.time() < deadline: + code = call(cmd) + if code == 0: + return True + hookenv.log(retry_msg) + time.sleep(1) + else: + return False + + def set_label(self, label: str, value: str) -> None: + """ + Add a label to this node. + + @param str label: Label name to apply + @param str value: Value to associate with the label + @raises LabelMaker.NodeLabelError: if the label cannot be added + """ + cmd = "kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite" + cmd = cmd.format(self.kubeconfig_path, self.node, label, value) + retry_msg = "Failed to apply label {0}={1}. Will retry.".format(label, value) + if not LabelMaker._retried_call(cmd.split(), retry_msg): + raise LabelMaker.NodeLabelError(retry_msg) + + def remove_label(self, label: str) -> None: + """ + Remove a label to this node. + + @param str label: Label name to remove + @raises LabelMaker.NodeLabelError: if the label cannot be removed + """ + cmd = "kubectl --kubeconfig={0} label node {1} {2}-" + cmd = cmd.format(self.kubeconfig_path, self.node, label) + retry_msg = "Failed to remove label {0}. Will retry.".format(label) + if not LabelMaker._retried_call(cmd.split(), retry_msg): + raise LabelMaker.NodeLabelError(retry_msg) + + def apply_node_labels(self) -> None: + """ + Parse the `labels` configuration option and apply the labels to the + node. + + @raises LabelMaker.NodeLabelError: if the label cannot be added or removed + """ + # Get the user's configured labels. + config = hookenv.config() + user_labels = {} + for item in config.get("labels").split(" "): + try: + key, val = item.split("=") + except ValueError: + hookenv.log("Skipping malformed option: {}.".format(item)) + else: + user_labels[key] = val + # Collect the current label state. + current_labels = db.get("current_labels") or {} + + try: + # Remove any labels that the user has removed from the config. + for key in list(current_labels.keys()): + if key not in user_labels: + self.remove_label(key) + del current_labels[key] + db.set("current_labels", current_labels) + + # Add any new labels. + for key, val in user_labels.items(): + self.set_label(key, val) + current_labels[key] = val + db.set("current_labels", current_labels) + + # Set the juju-application label. + self.set_label("juju-application", hookenv.service_name()) + + # Set the juju.io/cloud label. + juju_io_cloud_labels = [ + ("aws", "ec2"), + ("gcp", "gce"), + ("openstack", "openstack"), + ("vsphere", "vsphere"), + ("azure", "azure"), + ] + for endpoint, label in juju_io_cloud_labels: + if is_state("endpoint.{0}.ready".format(endpoint)): + self.set_label("juju.io/cloud", label) + break + else: + # none of the endpoints matched, remove the label + self.remove_label("juju.io/cloud") + + except self.NodeLabelError as ex: + hookenv.log(str(ex)) + raise diff --git a/kubernetes-worker/lib/charms/layer/nagios.py b/kubernetes-worker/lib/charms/layer/nagios.py new file mode 100644 index 0000000..f6ad998 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/nagios.py @@ -0,0 +1,60 @@ +from pathlib import Path + +NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins' + + +def install_nagios_plugin_from_text(text, plugin_name): + """ Install a nagios plugin. + + Args: + text: Plugin source code (str) + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + # we could complain here, test the files are the same contents, or + # just bail. Idempotency is a big deal in Juju, so I'd like to be + # ok with being called with the same file multiple times, but we + # certainly want to catch the case where multiple layers are using + # the same filename for their nagios checks. + dest = dest_path.read_text() + if dest == text: + # same file + return dest_path + # different file contents! + # maybe someone changed options or something so we need to write + # it again + + dest_path.write_text(text) + dest_path.chmod(0o755) + + return dest_path + + +def install_nagios_plugin_from_file(source_file_path, plugin_name): + """ Install a nagios plugin. + + Args: + source_file_path: Path to plugin source file + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + + return install_nagios_plugin_from_text(Path(source_file_path).read_text(), + plugin_name) + + +def remove_nagios_plugin(plugin_name): + """ Remove a nagios plugin. + + Args: + plugin_name: Name of the plugin in nagios + + Returns: None + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + dest_path.unlink() diff --git a/kubernetes-worker/lib/charms/layer/options.py b/kubernetes-worker/lib/charms/layer/options.py new file mode 100644 index 0000000..d3f273f --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/options.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path + +import yaml + + +_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.')) +_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml' +_CACHE = {} + + +def get(section=None, option=None, layer_file=_DEFAULT_FILE): + if option and not section: + raise ValueError('Cannot specify option without section') + + layer_file = (_CHARM_PATH / layer_file).resolve() + if layer_file not in _CACHE: + with layer_file.open() as fp: + _CACHE[layer_file] = yaml.safe_load(fp.read()) + + data = _CACHE[layer_file].get('options', {}) + if section: + data = data.get(section, {}) + if option: + data = data.get(option) + return data diff --git a/kubernetes-worker/lib/charms/layer/snap.py b/kubernetes-worker/lib/charms/layer/snap.py new file mode 100644 index 0000000..ae9be45 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/snap.py @@ -0,0 +1,492 @@ +# Copyright 2016-2019 Canonical Ltd. +# +# This file is part of the Snap layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +import tenacity +import yaml + +from charmhelpers.core import hookenv +from charms import layer +from charms import reactive +from charms.reactive.helpers import any_file_changed, data_changed +from datetime import datetime, timedelta + + +def get_installed_flag(snapname): + return "snap.installed.{}".format(snapname) + + +def get_refresh_available_flag(snapname): + return "snap.refresh-available.{}".format(snapname) + + +def get_local_flag(snapname): + return "snap.local.{}".format(snapname) + + +def get_disabled_flag(snapname): + return "snap.disabled.{}".format(snapname) + + +def install(snapname, **kw): + """Install a snap. + + Snap will be installed from the coresponding resource if available, + otherwise from the Snap Store. + + Sets the snap.installed.{snapname} flag. + + If the snap.installed.{snapname} flag is already set then the refresh() + function is called. + """ + installed_flag = get_installed_flag(snapname) + local_flag = get_local_flag(snapname) + if reactive.is_flag_set(installed_flag): + refresh(snapname, **kw) + else: + if hookenv.has_juju_version("2.0"): + res_path = _resource_get(snapname) + if res_path is False: + _install_store(snapname, **kw) + else: + _install_local(res_path, **kw) + reactive.set_flag(local_flag) + else: + _install_store(snapname, **kw) + reactive.set_flag(installed_flag) + + # Installing any snap will first ensure that 'core' is installed. Set an + # appropriate flag for consumers that want to get/set core options. + core_installed = get_installed_flag("core") + if not reactive.is_flag_set(core_installed): + reactive.set_flag(core_installed) + + +def is_installed(snapname): + return reactive.is_flag_set(get_installed_flag(snapname)) + + +def is_local(snapname): + return reactive.is_flag_set(get_local_flag(snapname)) + + +def get_installed_snaps(): + """Return a list of snaps which are installed by this layer.""" + flag_prefix = "snap.installed." + return [flag[len(flag_prefix) :] for flag in reactive.get_flags() if flag.startswith(flag_prefix)] + + +def refresh(snapname, **kw): + """Update a snap. + + Snap will be pulled from the coresponding resource if available + and reinstalled if it has changed. Otherwise a 'snap refresh' is + run updating the snap from the Snap Store, potentially switching + channel and changing confinement options. + """ + # Note that once you upload a resource, you can't remove it. + # This means we don't need to cope with an operator switching + # from a resource provided to a store provided snap, because there + # is no way for them to do that. Well, actually the operator could + # upload a zero byte resource, but then we would need to uninstall + # the snap before reinstalling from the store and that has the + # potential for data loss. + local_flag = get_local_flag(snapname) + if hookenv.has_juju_version("2.0"): + res_path = _resource_get(snapname) + if res_path is False: + _refresh_store(snapname, **kw) + reactive.clear_flag(local_flag) + else: + _install_local(res_path, **kw) + reactive.set_flag(local_flag) + else: + _refresh_store(snapname, **kw) + reactive.clear_flag(local_flag) + + +def remove(snapname): + hookenv.log("Removing snap {}".format(snapname)) + subprocess.check_call(["snap", "remove", snapname]) + reactive.clear_flag(get_installed_flag(snapname)) + + +def connect(plug, slot): + """Connect or reconnect a snap plug with a slot. + + Each argument must be a two element tuple, corresponding to + the two arguments to the 'snap connect' command. + """ + hookenv.log("Connecting {} to {}".format(plug, slot), hookenv.DEBUG) + subprocess.check_call(["snap", "connect", plug, slot]) + + +def connect_all(): + """Connect or reconnect all interface connections defined in layer.yaml. + + This method will fail if called before all referenced snaps have been + installed. + """ + opts = layer.options("snap") + for snapname, snap_opts in opts.items(): + for plug, slot in snap_opts.get("connect", []): + connect(plug, slot) + + +def disable(snapname): + """Disables a snap in the system + + Sets the snap.disabled.{snapname} flag + + This method doesn't affect any snap flag if requested snap does not + exist + """ + hookenv.log("Disabling {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot disable {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "disable", snapname]) + reactive.set_flag(get_disabled_flag(snapname)) + + +def enable(snapname): + """Enables a snap in the system + + Clears the snap.disabled.{snapname} flag + + This method doesn't affect any snap flag if requeted snap does not + exist + """ + hookenv.log("Enabling {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot enable {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "enable", snapname]) + reactive.clear_flag(get_disabled_flag(snapname)) + + +def restart(snapname): + """Restarts a snap in the system + + This method doesn't affect any snap flag if requested snap does not + exist + """ + hookenv.log("Restarting {} snap".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot restart {} snap because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "restart", snapname]) + + +def set(snapname, key, value): + """Changes configuration options in a snap + + This method will fail if snapname is not an installed snap + """ + hookenv.log("Set config {}={} for snap {}".format(key, value, snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot set {} snap config because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + subprocess.check_call(["snap", "set", snapname, "{}={}".format(key, value)]) + + +def set_refresh_timer(timer=""): + """Set the system refresh.timer option (snapd 2.31+) + + This method sets how often snapd will refresh installed snaps. Call with + an empty timer string to use the system default (currently 4x per day). + Use 'max' to schedule refreshes as far into the future as possible + (currently 1 month). Also accepts custom timer strings as defined in the + refresh.timer section here: + https://forum.snapcraft.io/t/system-options/87 + + This method does not validate custom strings and will lead to a + CalledProcessError if an invalid string is given. + + :param: timer: empty string (default), 'max', or custom string + """ + if timer == "max": + # A month from yesterday is the farthest we should delay to safely stay + # under the 1 month max. Translate that to a valid refresh.timer value. + # Examples: + # - Today is Friday the 13th, set the refresh timer to + # 'thu2' (Thursday the 12th is the 2nd thursday of the month). + # - Today is Tuesday the 1st, set the refresh timer to + # 'mon5' (Monday the [28..31] is the 5th monday of the month). + yesterday = datetime.now() - timedelta(1) + dow = yesterday.strftime("%a").lower() + # increment after int division because we want occurrence 1-5, not 0-4. + occurrence = yesterday.day // 7 + 1 + timer = "{}{}".format(dow, occurrence) + + # NB: 'system' became synonymous with 'core' in 2.32.5, but we use 'core' + # here to ensure max compatibility. + set(snapname="core", key="refresh.timer", value=timer) + subprocess.check_call(["systemctl", "restart", "snapd.service"]) + + +def get(snapname, key): + """Gets configuration options for a snap + + This method returns the stripped output from the snap get command. + This method will fail if snapname is not an installed snap. + """ + hookenv.log("Get config {} for snap {}".format(key, snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get {} snap config because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + + return subprocess.check_output(["snap", "get", snapname, key]).strip() + + +def _snap_list(): + """Constructs a dict with all installed snaps. + + Queries all the snaps installed and returns a dict containing their + versions and tracking channels, indexed by the snap name. + """ + cmd = ["snap", "list"] + out = subprocess.check_output(cmd).decode("utf-8", errors="replace").split() + snaps = {} + for i in range(6, len(out) - 5, 6): # Skip first six, which are the titles + # Snap list has 6 columns: + # name, version, revision, tracking channel, publisher and notes + # We only care about name (0), version (1) and tracking channel (3) + snaps[out[i]] = { + 'version': out[i + 1], + 'channel': out[i + 3], + } + return snaps + + +def get_installed_version(snapname): + """Gets the installed version of a snapname. + This function will return nothing if snapname is not an installed snap. + """ + hookenv.log("Get installed key for snap {}".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get {} snap installed version because it is not installed".format(snapname), + hookenv.WARNING, + ) + return + try: + return _snap_list()[snapname]['version'] + except Exception as e: + # If it fails to get the version information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap version: {}".format(e), + hookenv.WARNING, + ) + return + + +def get_installed_channel(snapname): + """Gets the tracking (channel) of a snapname. + This function will return nothing if snapname is not an installed snap. + """ + hookenv.log("Get channel for snap {}".format(snapname)) + if not reactive.is_flag_set(get_installed_flag(snapname)): + hookenv.log( + "Cannot get snap tracking (channel) because it is not installed", + hookenv.WARNING, + ) + return + try: + return _snap_list()[snapname]['channel'] + except Exception as e: + # If it fails to get the channel information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap tracking (channel): {}".format(e), + hookenv.WARNING, + ) + return + + +def _snap_args( + channel="stable", + devmode=False, + jailmode=False, + dangerous=False, + force_dangerous=False, + connect=None, + classic=False, + revision=None, +): + yield "--channel={}".format(channel) + if devmode is True: + yield "--devmode" + if jailmode is True: + yield "--jailmode" + if force_dangerous is True or dangerous is True: + yield "--dangerous" + if classic is True: + yield "--classic" + if revision is not None: + yield "--revision={}".format(revision) + + +def _install_local(path, **kw): + key = "snap.local.{}".format(path) + if data_changed(key, kw) or any_file_changed([path]): + cmd = ["snap", "install"] + cmd.extend(_snap_args(**kw)) + cmd.append("--dangerous") + cmd.append(path) + hookenv.log("Installing {} from local resource".format(path)) + subprocess.check_call(cmd) + + +def _install_store(snapname, **kw): + """Install snap from store + + :param snapname: Name of snap to install + :type snapname: str + :param kw: Keyword arguments to pass on to ``snap install`` + :type kw: Dict[str, str] + :raises: subprocess.CalledProcessError + """ + cmd = ["snap", "install"] + cmd.extend(_snap_args(**kw)) + cmd.append(snapname) + hookenv.log("Installing {} from store".format(snapname)) + + # Use tenacity decorator for Trusty support (See LP Bug #1934163) + @tenacity.retry( + wait=tenacity.wait_fixed(10), # seconds + stop=tenacity.stop_after_attempt(3), + reraise=True, + ) + def _run_install(): + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + hookenv.log( + 'Installation successful cmd="{}" output="{}"'.format(cmd, out), + level=hookenv.DEBUG, + ) + reactive.clear_flag(get_local_flag(snapname)) + except subprocess.CalledProcessError as cp: + hookenv.log( + 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), + level=hookenv.ERROR, + ) + raise + + _run_install() + + +def _refresh_store(snapname, **kw): + if not data_changed("snap.opts.{}".format(snapname), kw): + return + + # --amend allows us to refresh from a local resource + cmd = ["snap", "refresh", "--amend"] + cmd.extend(_snap_args(**kw)) + cmd.append(snapname) + hookenv.log("Refreshing {} from store".format(snapname)) + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + print(out) + + +def _resource_get(snapname): + """Used to fetch the resource path of the given name. + + This wrapper obtains a resource path and adds an additional + check to return False if the resource is zero length. + """ + res_path = hookenv.resource_get(snapname) + if res_path and os.stat(res_path).st_size != 0: + return res_path + return False + + +def get_available_refreshes(): + """Return a list of snaps which have refreshes available.""" + try: + out = subprocess.check_output(["snap", "refresh", "--list"]).decode("utf8") + except subprocess.CalledProcessError: + # If snap refresh fails for whatever reason, we should just return no + # refreshes available - LP:1869630. + return [] + + if out == "All snaps up to date.": + return [] + else: + return [line.split()[0] for line in out.splitlines()[1:]] + + +def is_refresh_available(snapname): + """Check whether a new revision is available for the given snap.""" + return reactive.is_flag_set(get_refresh_available_flag(snapname)) + + +def _check_refresh_available(snapname): + return snapname in get_available_refreshes() + + +def create_cohort_snapshot(snapname): + """Create a new cohort key for the given snap. + + Cohort keys represent a snapshot of the revision of a snap at the time + the key was created. These keys can then be used on any machine to lock + the revision of the snap until a new cohort is joined (or the key expires, + after 90 days). This is used to maintain consistency of the revision of + the snap across units or applications, and to manage the refresh of the + snap in a controlled manner. + + Returns a cohort key. + """ + out = subprocess.check_output(["snap", "create-cohort", snapname]) + data = yaml.safe_load(out.decode("utf8")) + return data["cohorts"][snapname]["cohort-key"] + + +def join_cohort_snapshot(snapname, cohort_key): + """Refresh the snap into the given cohort. + + If the snap was previously in a cohort, this will update the revision + to that of the new cohort snapshot. Note that this does not change the + channel that the snap is in, only the revision within that channel. + """ + if is_local(snapname): + # joining a cohort can override a locally installed snap + hookenv.log("Skipping joining cohort for local snap: " "{}".format(snapname)) + return + subprocess.check_output(["snap", "refresh", snapname, "--cohort", cohort_key]) + # even though we just refreshed to the latest in the cohort, it's + # slightly possible that there's a newer rev available beyond the cohort + reactive.toggle_flag(get_refresh_available_flag(snapname), _check_refresh_available(snapname)) diff --git a/kubernetes-worker/lib/charms/layer/status.py b/kubernetes-worker/lib/charms/layer/status.py new file mode 100644 index 0000000..95b2997 --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/status.py @@ -0,0 +1,189 @@ +import inspect +import errno +import subprocess +import yaml +from enum import Enum +from functools import wraps +from pathlib import Path + +from charmhelpers.core import hookenv +from charms import layer + + +_orig_call = subprocess.call +_statuses = {'_initialized': False, + '_finalized': False} + + +class WorkloadState(Enum): + """ + Enum of the valid workload states. + + Valid options are: + + * `WorkloadState.MAINTENANCE` + * `WorkloadState.BLOCKED` + * `WorkloadState.WAITING` + * `WorkloadState.ACTIVE` + """ + # note: order here determines precedence of state + MAINTENANCE = 'maintenance' + BLOCKED = 'blocked' + WAITING = 'waiting' + ACTIVE = 'active' + + +def maintenance(message): + """ + Set the status to the `MAINTENANCE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.MAINTENANCE, message) + + +def maint(message): + """ + Shorthand alias for + [maintenance](status.md#charms.layer.status.maintenance). + + # Parameters + `message` (str): Message to convey to the operator. + """ + maintenance(message) + + +def blocked(message): + """ + Set the status to the `BLOCKED` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.BLOCKED, message) + + +def waiting(message): + """ + Set the status to the `WAITING` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.WAITING, message) + + +def active(message): + """ + Set the status to the `ACTIVE` state with the given operator message. + + # Parameters + `message` (str): Message to convey to the operator. + """ + status_set(WorkloadState.ACTIVE, message) + + +def status_set(workload_state, message): + """ + Set the status to the given workload state with a message. + + # Parameters + `workload_state` (WorkloadState or str): State of the workload. Should be + a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum + member, or the string value of one of those members. + `message` (str): Message to convey to the operator. + """ + if not isinstance(workload_state, WorkloadState): + workload_state = WorkloadState(workload_state) + if workload_state is WorkloadState.MAINTENANCE: + _status_set_immediate(workload_state, message) + return + layer = _find_calling_layer() + _statuses.setdefault(workload_state, []).append((layer, message)) + if not _statuses['_initialized'] or _statuses['_finalized']: + # We either aren't initialized, so the finalizer may never be run, + # or the finalizer has already run, so it won't run again. In either + # case, we need to manually invoke it to ensure the status gets set. + _finalize() + + +def _find_calling_layer(): + for frame in inspect.stack(): + # switch to .filename when trusty (Python 3.4) is EOL + fn = Path(frame[1]) + if fn.parent.stem not in ('reactive', 'layer', 'charms'): + continue + layer_name = fn.stem + if layer_name == 'status': + continue # skip our own frames + return layer_name + return None + + +def _initialize(): + if not _statuses['_initialized']: + if layer.options.get('status', 'patch-hookenv'): + _patch_hookenv() + hookenv.atexit(_finalize) + _statuses['_initialized'] = True + + +def _finalize(): + if _statuses['_initialized']: + # If we haven't been initialized, we can't truly be finalized. + # This makes things more efficient if an action sets a status + # but subsequently starts the reactive bus. + _statuses['_finalized'] = True + charm_name = hookenv.charm_name() + charm_dir = Path(hookenv.charm_dir()) + with charm_dir.joinpath('layer.yaml').open() as fp: + includes = yaml.safe_load(fp.read()).get('includes', []) + layer_order = includes + [charm_name] + + for workload_state in WorkloadState: + if workload_state not in _statuses: + continue + if not _statuses[workload_state]: + continue + + def _get_key(record): + layer_name, message = record + if layer_name in layer_order: + return layer_order.index(layer_name) + else: + return 0 + + sorted_statuses = sorted(_statuses[workload_state], key=_get_key) + layer_name, message = sorted_statuses[-1] + _status_set_immediate(workload_state, message) + break + + +def _status_set_immediate(workload_state, message): + workload_state = workload_state.value + try: + hookenv.log('status-set: {}: {}'.format(workload_state, message), + hookenv.INFO) + ret = _orig_call(['status-set', workload_state, message]) + if ret == 0: + return + except OSError as e: + # ignore status-set not available on older controllers + if e.errno != errno.ENOENT: + raise + + +def _patch_hookenv(): + # we can't patch hookenv.status_set directly because other layers may have + # already imported it into their namespace, so we have to patch sp.call + subprocess.call = _patched_call + + +@wraps(_orig_call) +def _patched_call(cmd, *args, **kwargs): + if not isinstance(cmd, list) or cmd[0] != 'status-set': + return _orig_call(cmd, *args, **kwargs) + _, workload_state, message = cmd + status_set(workload_state, message) + return 0 # make hookenv.status_set not emit spurious failure logs diff --git a/kubernetes-worker/lib/charms/layer/tls_client.py b/kubernetes-worker/lib/charms/layer/tls_client.py new file mode 100644 index 0000000..b2980dc --- /dev/null +++ b/kubernetes-worker/lib/charms/layer/tls_client.py @@ -0,0 +1,61 @@ +# Copyright 2016-2017 Canonical Ltd. +# +# This file is part of the tls-client layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import log +from charmhelpers.core import unitdata + +from charms.reactive import remove_state +from charms.reactive import endpoint_from_flag + + +def reset_certificate_write_flag(cert_type): + """ + Reset the certificate written flag so notification will work on the next + write cert_type must be 'server', 'client', or 'ca' to indicate type of + certificate + """ + if cert_type not in ['server', 'client', 'ca']: + log('Unknown certificate type!') + else: + remove_state('tls_client.{0}.certificate.written'.format(cert_type)) + + +def request_server_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_server_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('server', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) + + +def request_client_cert(common_name, sans=None, crt_path=None, key_path=None): + tls = endpoint_from_flag('certificates.available') + tls.request_client_cert(common_name, sans) + if not crt_path and not key_path: + return + kv = unitdata.kv() + cert_paths = kv.get('layer.tls-client.cert-paths', {}) + cert_paths.setdefault('client', {})[common_name] = { + 'crt': str(crt_path), + 'key': str(key_path), + } + kv.set('layer.tls-client.cert-paths', cert_paths) diff --git a/kubernetes-worker/lib/charms/leadership.py b/kubernetes-worker/lib/charms/leadership.py new file mode 100644 index 0000000..d2a95fa --- /dev/null +++ b/kubernetes-worker/lib/charms/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.reactive import not_unless + + +__all__ = ['leader_get', 'leader_set'] + + +@not_unless('leadership.is_leader') +def leader_set(*args, **kw): + '''Change leadership settings, per charmhelpers.core.hookenv.leader_set. + + Settings may either be passed in as a single dictionary, or using + keyword arguments. All values must be strings. + + The leadership.set.{key} reactive state will be set while the + leadership hook environment setting remains set. + + Changed leadership settings will set the leadership.changed.{key} + and leadership.changed states. These states will remain set until + the following hook. + + These state changes take effect immediately on the leader, and + in future hooks run on non-leaders. In this way both leaders and + non-leaders can share handlers, waiting on these states. + ''' + if args: + if len(args) > 1: + raise TypeError('leader_set() takes 1 positional argument but ' + '{} were given'.format(len(args))) + else: + settings = dict(args[0]) + else: + settings = {} + settings.update(kw) + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + + for key, value in settings.items(): + if value != previous.get(key): + reactive.set_state('leadership.changed.{}'.format(key)) + reactive.set_state('leadership.changed') + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + hookenv.leader_set(settings) + unitdata.kv().update(settings, prefix='leadership.settings.') + + +def leader_get(attribute=None): + '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.''' + return hookenv.leader_get(attribute) diff --git a/kubernetes-worker/lib/debug_script.py b/kubernetes-worker/lib/debug_script.py new file mode 100644 index 0000000..e156924 --- /dev/null +++ b/kubernetes-worker/lib/debug_script.py @@ -0,0 +1,8 @@ +import os + +dir = os.environ["DEBUG_SCRIPT_DIR"] + + +def open_file(path, *args, **kwargs): + """ Open a file within the debug script dir """ + return open(os.path.join(dir, path), *args, **kwargs) diff --git a/kubernetes-worker/lxd-profile.yaml b/kubernetes-worker/lxd-profile.yaml new file mode 100644 index 0000000..6b4babc --- /dev/null +++ b/kubernetes-worker/lxd-profile.yaml @@ -0,0 +1,16 @@ +name: juju-default-k8s-deployment-0 +config: + linux.kernel_modules: ip_tables,ip6_tables,netlink_diag,nf_nat,overlay + raw.lxc: | + lxc.apparmor.profile=unconfined + lxc.mount.auto=proc:rw sys:rw + lxc.cgroup.devices.allow=a + lxc.cap.drop= + security.nesting: true + security.privileged: true +description: "" +devices: + aadisable: + path: /dev/kmsg + source: /dev/kmsg + type: unix-char diff --git a/kubernetes-worker/make_docs b/kubernetes-worker/make_docs new file mode 100644 index 0000000..dcd4c1f --- /dev/null +++ b/kubernetes-worker/make_docs @@ -0,0 +1,20 @@ +#!.tox/py3/bin/python + +import os +import sys +from shutil import rmtree +from unittest.mock import patch + +import pydocmd.__main__ + + +with patch('charmhelpers.core.hookenv.metadata') as metadata: + sys.path.insert(0, 'lib') + sys.path.insert(1, 'reactive') + print(sys.argv) + if len(sys.argv) == 1: + sys.argv.extend(['build']) + pydocmd.__main__.main() + rmtree('_build') + if os.path.exists('.unit-state.db'): + os.remove('.unit-state.db') diff --git a/kubernetes-worker/manifest.yaml b/kubernetes-worker/manifest.yaml new file mode 100644 index 0000000..9f3ccbf --- /dev/null +++ b/kubernetes-worker/manifest.yaml @@ -0,0 +1,27 @@ +analysis: + attributes: + - name: language + result: python + - name: framework + result: reactive +bases: +- architectures: + - amd64 + - s390x + - arm64 + channel: '20.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '22.04' + name: ubuntu +- architectures: + - amd64 + - s390x + - arm64 + channel: '18.04' + name: ubuntu +charmcraft-started-at: '2022-07-14T00:00:00.000000Z' +charmcraft-version: 1.7.1 diff --git a/kubernetes-worker/metadata.yaml b/kubernetes-worker/metadata.yaml new file mode 100644 index 0000000..1b341d8 --- /dev/null +++ b/kubernetes-worker/metadata.yaml @@ -0,0 +1,105 @@ +"name": "kubernetes-worker" +"summary": "The workload bearing units of a kubernetes cluster" +"maintainers": +- "Tim Van Steenburgh " +- "George Kraft " +- "Rye Terrell " +- "Konstantinos Tsakalozos " +- "Charles Butler " +- "Matthew Bruzek " +- "Mike Wilson " +- "Joe Borg " +"description": | + Kubernetes is an open-source platform for deploying, scaling, and operations + of application containers across a cluster of hosts. Kubernetes is portable + in that it works with public, private, and hybrid clouds. Extensible through + a pluggable infrastructure. Self healing in that it will automatically + restart and place containers on healthy nodes if a node ever goes away. +"tags": +- "misc" +"series": +- "focal" +- "jammy" +- "bionic" +"requires": + "certificates": + "interface": "tls-certificates" + "kube-api-endpoint": + # kube-api-endpoint is not recommended as the API endpoints will be provided + # via the kube-control relation. However, it can be used to override those + # endpoints if you need to inject a reverse proxy between the control-plane and workers. + "interface": "http" + "kube-control": + "interface": "kube-control" + "aws": + "interface": "aws-integration" + "gcp": + "interface": "gcp-integration" + "openstack": + "interface": "openstack-integration" + "vsphere": + "interface": "vsphere-integration" + "azure": + "interface": "azure-integration" + "nfs": + "interface": "mount" +"provides": + "nrpe-external-master": + "interface": "nrpe-external-master" + "scope": "container" + "container-runtime": + "interface": "container-runtime" + "scope": "container" + "cni": + "interface": "kubernetes-cni" + "scope": "container" + "ingress-proxy": + "interface": "http" + "scrape": + "interface": "prometheus" +"peers": + "coordinator": + "interface": "coordinator" +"docs": "https://discourse.charmhub.io/t/kubernetes-worker-docs-index/6104" +"resources": + "cni-amd64": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for amd64" + "cni-arm64": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for arm64" + "cni-s390x": + "type": "file" + "filename": "cni.tgz" + "description": "CNI plugins for s390x" + "core": + "type": "file" + "filename": "core.snap" + "description": | + core snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kubectl": + "type": "file" + "filename": "kubectl.snap" + "description": | + kubectl snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kubelet": + "type": "file" + "filename": "kubelet.snap" + "description": | + kubelet snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. + "kube-proxy": + "type": "file" + "filename": "kube-proxy.snap" + "description": | + kube-proxy snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. +"subordinate": !!bool "false" diff --git a/kubernetes-worker/metrics.yaml b/kubernetes-worker/metrics.yaml new file mode 100644 index 0000000..0fcb3c1 --- /dev/null +++ b/kubernetes-worker/metrics.yaml @@ -0,0 +1,2 @@ +metrics: + juju-units: {} diff --git a/kubernetes-worker/pydocmd.yml b/kubernetes-worker/pydocmd.yml new file mode 100644 index 0000000..ab3b2ef --- /dev/null +++ b/kubernetes-worker/pydocmd.yml @@ -0,0 +1,16 @@ +site_name: 'Status Management Layer' + +generate: + - status.md: + - charms.layer.status.WorkloadState + - charms.layer.status.maintenance + - charms.layer.status.maint + - charms.layer.status.blocked + - charms.layer.status.waiting + - charms.layer.status.active + - charms.layer.status.status_set + +pages: + - Status Management Layer: status.md + +gens_dir: docs diff --git a/kubernetes-worker/pyproject.toml b/kubernetes-worker/pyproject.toml new file mode 100644 index 0000000..db0dcd0 --- /dev/null +++ b/kubernetes-worker/pyproject.toml @@ -0,0 +1,3 @@ +[tool.black] +line-length=120 +target-version=['py35'] diff --git a/kubernetes-worker/reactive/__init__.py b/kubernetes-worker/reactive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-worker/reactive/apt.py b/kubernetes-worker/reactive/apt.py new file mode 100644 index 0000000..8832296 --- /dev/null +++ b/kubernetes-worker/reactive/apt.py @@ -0,0 +1,158 @@ +# Copyright 2015-2020 Canonical Ltd. +# +# This file is part of the Apt layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +''' +charms.reactive helpers for dealing with deb packages. + +Add apt package sources using add_source(). Queue deb packages for +installation with install(). Configure and work with your software +once the apt.installed.{packagename} flag is set. +''' +import os.path +import subprocess +import re + +from charmhelpers import fetch +from charmhelpers.core import hookenv +from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING +from charms import layer +from charms.layer import status +from charms import reactive +from charms.reactive import when, when_not + +import charms.apt + + +@when('apt.needs_update') +def update(): + charms.apt.update() + + +@when('apt.queued_installs') +@when_not('apt.needs_update') +def install_queued(): + charms.apt.install_queued() + + +@when_not('apt.queued_installs') +def ensure_package_status(): + charms.apt.ensure_package_status() + + +def filter_installed_packages(packages): + # Don't use fetch.filter_installed_packages, as it depends on python-apt + # and not available if the basic layer's use_site_packages option is off + cmd = ['dpkg-query', '--show', r'--showformat=${Package}\n'] + installed = set(subprocess.check_output(cmd, universal_newlines=True).split()) + + # list of packages that are not installed + not_installed = set(packages) - installed + + # now we want to check for any regex in the installation of the packages + not_installed_iterable = not_installed.copy() + for pkg in not_installed_iterable: + # grab the pattern that we want to match against the packages + p = re.compile(pkg) + for pkg2 in installed: + matched = p.search(pkg2) + if matched: + not_installed.remove(pkg) + break + + return not_installed + + +def clear_removed_package_flags(): + """On hook startup, clear install flags for removed packages.""" + removed = filter_installed_packages(charms.apt.installed()) + if removed: + hookenv.log('{} missing packages ({})'.format(len(removed), ','.join(removed)), WARNING) + for package in removed: + reactive.clear_flag('apt.installed.{}'.format(package)) + + +def add_implicit_signing_keys(): + """Add keys specified in layer.yaml + + The charm can ship trusted keys, avoiding the need to specify + them in config.yaml. We need to add them before we attempt + to add any custom sources, or apt will block under Bionic + if we attempt to add a source before the key becomes trusted. + """ + opts = layer.options() + if 'apt' not in opts or 'keys' not in opts['apt']: + return + keys = opts['apt']['keys'] + for p in keys: + full_p = os.path.join(hookenv.charm_dir(), p) + if os.path.exists(full_p): + hookenv.log("Adding key {}".format(p), DEBUG) + subprocess.check_call( + ['apt-key', 'add', full_p], + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + else: + hookenv.log('Key {!r} does not exist'.format(full_p), ERROR) + + +def configure_sources(): + """Add user specified package sources from the service configuration. + + See charmhelpers.fetch.configure_sources for details. + """ + config = hookenv.config() + + # We don't have enums, so we need to validate this ourselves. + package_status = config.get('package_status') or '' + if package_status not in ('hold', 'install'): + status.blocked('Unknown package_status {}'.format(package_status)) + # Die before further hooks are run. This isn't very nice, but + # there is no other way to inform the operator that they have + # invalid configuration. + raise SystemExit(0) + + sources = config.get('install_sources') or '' + keys = config.get('install_keys') or '' + if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)): + fetch.configure_sources(update=False, sources_var='install_sources', keys_var='install_keys') + reactive.set_flag('apt.needs_update') + + # Clumsy 'config.get() or' per Bug #1641362 + extra_packages = sorted((config.get('extra_packages') or '').split()) + if extra_packages: + charms.apt.queue_install(extra_packages) + + +def queue_layer_packages(): + """Add packages listed in build-time layer options.""" + # Both basic and apt layer. basic layer will have already installed + # its defined packages, but rescheduling it here gets the apt layer + # flag set and they will pinned as any other apt layer installed + # package. + opts = layer.options() + for section in ['basic', 'apt']: + if section in opts and 'packages' in opts[section]: + charms.apt.queue_install(opts[section]['packages']) + + +hookenv.atstart(hookenv.log, 'Initializing Apt Layer') +hookenv.atstart(clear_removed_package_flags) +hookenv.atstart(add_implicit_signing_keys) +hookenv.atstart(configure_sources) +hookenv.atstart(queue_layer_packages) +hookenv.atstart(charms.apt.reset_application_version) diff --git a/kubernetes-worker/reactive/cdk_service_kicker.py b/kubernetes-worker/reactive/cdk_service_kicker.py new file mode 100644 index 0000000..f7fd33a --- /dev/null +++ b/kubernetes-worker/reactive/cdk_service_kicker.py @@ -0,0 +1,32 @@ +import os +import subprocess +from charms import layer +from charms.reactive import hook, when_not, remove_state, set_state +from charmhelpers.core.templating import render + + +@hook('upgrade-charm') +def upgrade_charm(): + remove_state('cdk-service-kicker.installed') + + +@when_not('cdk-service-kicker.installed') +def install_cdk_service_kicker(): + ''' Installs the cdk-service-kicker service. Workaround for + https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357 + ''' + source = 'cdk-service-kicker' + dest = '/usr/bin/cdk-service-kicker' + services = layer.options('cdk-service-kicker').get('services') + context = {'services': ' '.join(services)} + render(source, dest, context) + os.chmod('/usr/bin/cdk-service-kicker', 0o775) + + source = 'cdk-service-kicker.service' + dest = '/etc/systemd/system/cdk-service-kicker.service' + context = {} + render(source, dest, context) + command = ['systemctl', 'enable', 'cdk-service-kicker'] + subprocess.check_call(command) + + set_state('cdk-service-kicker.installed') diff --git a/kubernetes-worker/reactive/coordinator.py b/kubernetes-worker/reactive/coordinator.py new file mode 100644 index 0000000..474a95d --- /dev/null +++ b/kubernetes-worker/reactive/coordinator.py @@ -0,0 +1,71 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Coordinator Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charms.coordinator import coordinator, log +import charms.reactive + + +def initialize_coordinator_state(): + ''' + The coordinator.granted.{lockname} state will be set and the + coordinator.requested.{lockname} state removed for every lock + granted to the currently running hook. + + The coordinator.requested.{lockname} state will remain set for locks + not yet granted + ''' + log('Initializing coordinator layer') + + requested = set(coordinator.requests.get(hookenv.local_unit(), {}).keys()) + previously_requested = set(state.split('.', 2)[2] + for state in charms.reactive.bus.get_states() + if state.startswith('coordinator.requested.')) + + granted = set(coordinator.grants.get(hookenv.local_unit(), {}).keys()) + previously_granted = set(state.split('.', 2)[2] + for state in charms.reactive.bus.get_states() + if state.startswith('coordinator.granted.')) + + # Set reactive state for requested locks. + for lock in requested: + log('Requested {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.set_state('coordinator.requested.{}'.format(lock)) + + # Set reactive state for locks that have been granted. + for lock in granted: + log('Granted {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.set_state('coordinator.granted.{}'.format(lock)) + + # Remove reactive state for locks that have been released. + for lock in (previously_granted - granted): + log('Dropped {} lock'.format(lock), hookenv.DEBUG) + charms.reactive.remove_state('coordinator.granted.{}'.format(lock)) + + # Remove requested state for locks no longer requested and not granted. + for lock in (previously_requested - requested - granted): + log('Request for {} lock was dropped'.format(lock), hookenv.DEBUG) + charms.reactive.remove_state('coordinator.requested.{}'.format(lock)) + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(charms.reactive, '_coordinator_registered'): + hookenv.atstart(initialize_coordinator_state) + charms.reactive._coordinator_registered = True diff --git a/kubernetes-worker/reactive/kubernetes_node_base.py b/kubernetes-worker/reactive/kubernetes_node_base.py new file mode 100644 index 0000000..1025fac --- /dev/null +++ b/kubernetes-worker/reactive/kubernetes_node_base.py @@ -0,0 +1,132 @@ +import os +from subprocess import check_call + +from charms.layer import snap +from charms.leadership import leader_get, leader_set +from charms.reactive import ( + clear_flag, + data_changed, + hook, + set_flag, + set_state, + when, + when_not, +) + +from charmhelpers.core import hookenv +from charmhelpers.core.host import is_container +from charmhelpers.core.sysctl import create as create_sysctl +from charms.layer.kubernetes_common import arch + + +@hook("upgrade-charm") +def upgrade_charm(): + clear_flag("kubernetes.cni-plugins.installed") + + +@when_not("kubernetes.cni-plugins.installed") +def install_cni_plugins(): + """Unpack the cni-plugins resource""" + hookenv.status_set("maintenance", "Installing CNI plugins") + + # Get the resource via resource_get + try: + resource_name = "cni-{}".format(arch()) + archive = hookenv.resource_get(resource_name) + except Exception: + message = "Error fetching the cni resource." + hookenv.log(message) + return + + if not archive: + hookenv.log("Missing cni resource.") + return + + # Handle null resource publication, we check if filesize < 1mb + filesize = os.stat(archive).st_size + if filesize < 1000000: + hookenv.log("Incomplete cni resource.") + return + + unpack_path = "/opt/cni/bin" + os.makedirs(unpack_path, exist_ok=True) + cmd = ["tar", "xfvz", archive, "-C", unpack_path] + hookenv.log(cmd) + check_call(cmd) + + set_flag("kubernetes.cni-plugins.installed") + + +@when("kubernetes-node.snaps.installed") +@when("snap.refresh.set") +@when("leadership.is_leader") +def process_snapd_timer(): + """ + Set the snapd refresh timer on the leader so all cluster members + (present and future) will refresh near the same time. + + :return: None + """ + # Get the current snapd refresh timer; we know layer-snap has set this + # when the 'snap.refresh.set' flag is present. + timer = snap.get(snapname="core", key="refresh.timer").decode("utf-8").strip() + if not timer: + # The core snap timer is empty. This likely means a subordinate timer + # reset ours. Try to set it back to a previously leader-set value, + # falling back to config if needed. Luckily, this should only happen + # during subordinate install, so this should remain stable afterward. + timer = leader_get("snapd_refresh") or hookenv.config("snapd_refresh") + snap.set_refresh_timer(timer) + + # Ensure we have the timer known by snapd (it may differ from config). + timer = snap.get(snapname="core", key="refresh.timer").decode("utf-8").strip() + + # The first time through, data_changed will be true. Subsequent calls + # should only update leader data if something changed. + if data_changed("snapd_refresh", timer): + hookenv.log("setting leader snapd_refresh timer to: {}".format(timer)) + leader_set({"snapd_refresh": timer}) + + +@when("kubernetes-node.snaps.installed") +@when("snap.refresh.set") +@when("leadership.changed.snapd_refresh") +@when_not("leadership.is_leader") +def set_snapd_timer(): + """ + Set the snapd refresh.timer on non-leader cluster members. + + :return: None + """ + # NB: This method should only be run when 'snap.refresh.set' is present. + # Layer-snap will always set a core refresh.timer, which may not be the + # same as our leader. Gating with 'snap.refresh.set' ensures layer-snap + # has finished and we are free to set our config to the leader's timer. + timer = leader_get("snapd_refresh") or "" # None will error + hookenv.log("setting snapd_refresh timer to: {}".format(timer)) + snap.set_refresh_timer(timer) + + +@when("config.changed.sysctl") +def write_sysctl(): + """ + :return: None + """ + sysctl_settings = hookenv.config("sysctl") + if sysctl_settings and not is_container(): + create_sysctl( + sysctl_settings, + "/etc/sysctl.d/50-kubernetes-charm.conf", + # Some keys in the config may not exist in /proc/sys/net/. + # For example, the conntrack module may not be loaded when + # using lxd drivers insteam of kvm. In these cases, we + # simply ignore the missing keys, rather than making time + # consuming calls out to the filesystem to check for their + # existence. + ignore=True, + ) + + +@when("config.changed.labels") +def handle_labels_changed(): + set_state("node.label-config-required") diff --git a/kubernetes-worker/reactive/kubernetes_worker.py b/kubernetes-worker/reactive/kubernetes_worker.py new file mode 100644 index 0000000..7eccfc2 --- /dev/null +++ b/kubernetes-worker/reactive/kubernetes_worker.py @@ -0,0 +1,1384 @@ +#!/usr/bin/env python + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import shutil +import subprocess +import time +import traceback + +from base64 import b64encode +from subprocess import check_call, check_output +from subprocess import CalledProcessError +from socket import gethostname + +import charms.coordinator +from charms import layer +from charms.layer import snap +from charms.reactive import hook +from charms.reactive import endpoint_from_flag +from charms.reactive import endpoint_from_name +from charms.reactive import remove_state, clear_flag +from charms.reactive import set_state, set_flag +from charms.reactive import is_state, is_flag_set, any_flags_set +from charms.reactive import when, when_any, when_not, when_none +from charms.reactive import data_changed, is_data_changed + +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core.host import service_stop, service_restart +from charmhelpers.core.host import service_pause, service_resume +from charmhelpers.core.templating import render +from charmhelpers.contrib.charmsupport import nrpe + +from charms.layer import kubernetes_common + +from charms.layer.kubernetes_common import kubeclientconfig_path +from charms.layer.kubernetes_common import migrate_resource_checksums +from charms.layer.kubernetes_common import check_resources_for_upgrade_needed +from charms.layer.kubernetes_common import ( + calculate_and_store_resource_checksums, +) # noqa +from charms.layer.kubernetes_common import create_kubeconfig +from charms.layer.kubernetes_common import kubectl +from charms.layer.kubernetes_common import arch, get_node_name +from charms.layer.kubernetes_common import parse_extra_args +from charms.layer.kubernetes_common import write_gcp_snap_config +from charms.layer.kubernetes_common import write_azure_snap_config +from charms.layer.kubernetes_common import kubeproxyconfig_path +from charms.layer.kubernetes_common import configure_kube_proxy +from charms.layer.kubernetes_common import get_version +from charms.layer.kubernetes_common import ca_crt_path +from charms.layer.kubernetes_common import server_crt_path +from charms.layer.kubernetes_common import server_key_path +from charms.layer.kubernetes_common import client_crt_path +from charms.layer.kubernetes_common import client_key_path +from charms.layer.kubernetes_common import get_unit_number +from charms.layer.kubernetes_common import get_node_ip +from charms.layer.kubernetes_common import configure_kubelet +from charms.layer.kubernetes_common import get_sandbox_image_uri +from charms.layer.kubernetes_common import configure_default_cni +from charms.layer.kubernetes_common import kubelet_kubeconfig_path + +from charms.layer.kubernetes_node_base import LabelMaker + +from charms.layer.nagios import install_nagios_plugin_from_text +from charms.layer.nagios import remove_nagios_plugin + +# Override the default nagios shortname regex to allow periods, which we +# need because our bin names contain them (e.g. 'snap.foo.daemon'). The +# default regex in charmhelpers doesn't allow periods, but nagios itself does. +nrpe.Check.shortname_re = r"[\.A-Za-z0-9-_]+$" +nrpe_kubeconfig_path = "/var/lib/nagios/.kube/config" + +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +snap_resources = ["kubectl", "kubelet", "kube-proxy"] +worker_services = ("kubelet", "kube-proxy") +checksum_prefix = "kubernetes-worker.resource-checksums." +configure_prefix = "kubernetes-worker.prev_args." +cpu_manager_state = "/var/lib/kubelet/cpu_manager_state" + +cohort_snaps = ["kubectl", "kubelet", "kube-proxy"] + +os.environ["PATH"] += os.pathsep + os.path.join(os.sep, "snap", "bin") +db = unitdata.kv() + + +@hook("upgrade-charm") +def upgrade_charm(): + # migrate to new flags + if is_state("kubernetes-worker.restarted-for-cloud"): + remove_state("kubernetes-worker.restarted-for-cloud") + set_state("kubernetes-worker.cloud.ready") + if is_state("kubernetes-worker.cloud-request-sent"): + # minor change, just for consistency + remove_state("kubernetes-worker.cloud-request-sent") + set_state("kubernetes-worker.cloud.request-sent") + if is_state("kubernetes-worker.snaps.installed"): + # consistent with layer-kubernetes-node-base + remove_state("kubernetes-worker.snaps.installed") + set_state("kubernetes-node.snaps.installed") + + set_state("config.changed.install_from_upstream") + hookenv.atexit(remove_state, "config.changed.install_from_upstream") + + cleanup_pre_snap_services() + migrate_resource_checksums(checksum_prefix, snap_resources) + if check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + set_upgrade_needed() + + # Remove the RC for nginx ingress if it exists + if hookenv.config().get("ingress"): + set_state("kubernetes-worker.remove-old-ingress") + + # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, + # since they can differ between k8s versions + if is_state("kubernetes-worker.gpu.enabled"): + remove_state("kubernetes-worker.gpu.enabled") + try: + disable_gpu() + except LabelMaker.NodeLabelError: + # Removing node label failed. Probably the control-plane is unavailable. + # Proceed with the upgrade in hope GPUs will still be there. + hookenv.log("Failed to remove GPU labels. Proceed with upgrade.") + + if hookenv.config("ingress"): + set_state("kubernetes-worker.ingress.enabled") + else: + remove_state("kubernetes-worker.ingress.enabled") + + # force certs to be updated + if all( + is_state(flag) + for flag in ( + "certificates.available", + "kube-control.connected", + "cni.available", + "kube-control.dns.available", + ) + ): + send_data() + + if is_state("kubernetes-worker.registry.configured"): + set_state("kubernetes-master-worker-base.registry.configured") + remove_state("kubernetes-worker.registry.configured") + + # need to clear cni.available state if it's no longer accurate + if is_state("cni.available"): + cni = endpoint_from_flag("cni.available") + if not cni.config_available(): + hookenv.log( + "cni.config_available() is False, clearing" + " cni.available flag" + ) + remove_state("cni.available") + + # need to bump the kube-control relation in case + # kube-control.default_cni.available is not set when it should be + if is_state("kube-control.connected"): + kube_control = endpoint_from_flag("kube-control.connected") + kube_control.manage_flags() + + shutil.rmtree("/root/cdk/kubelet/dynamic-config", ignore_errors=True) + + # kubernetes-worker.cni-plugins.installed flag is deprecated but we still + # want to clean it up + remove_state("kubernetes-worker.cni-plugins.installed") + + remove_state("kubernetes-worker.config.created") + remove_state("kubernetes-worker.ingress.available") + remove_state("worker.auth.bootstrapped") + remove_state("nfs.configured") + set_state("kubernetes-worker.restart-needed") + + +@hook("pre-series-upgrade") +def pre_series_upgrade(): + # NB: We use --force here because unmanaged pods are going to die anyway + # when the node is shut down, and it's better to let drain cleanly + # terminate them. We use --delete-local-data because the dashboard, at + # least, uses local data (emptyDir); but local data is documented as being + # ephemeral anyway, so we can assume it should be ok. + kubectl( + "drain", + get_node_name(), + "--ignore-daemonsets", + "--force", + "--delete-local-data", + ) + service_pause("snap.kubelet.daemon") + service_pause("snap.kube-proxy.daemon") + + +@hook("post-series-upgrade") +def post_series_upgrade(): + service_resume("snap.kubelet.daemon") + service_resume("snap.kube-proxy.daemon") + kubectl("uncordon", get_node_name()) + + +@when("kubernetes-worker.remove-old-ingress") +def remove_old_ingress(): + try: + kubectl("delete", "rc", "nginx-ingress-controller", "--ignore-not-found") + + # these moved into a different namespace for 1.12 + kubectl("delete", "rc", "default-http-backend", "--ignore-not-found") + kubectl("delete", "svc", "default-http-backend", "--ignore-not-found") + kubectl( + "delete", + "ds", + "nginx-ingress-{}-controller".format(hookenv.service_name()), + "--ignore-not-found", + ) + kubectl( + "delete", + "serviceaccount", + "nginx-ingress-{}-serviceaccount".format(hookenv.service_name()), + "--ignore-not-found", + ) + kubectl( + "delete", + "clusterrolebinding", + "nginx-ingress-clusterrole-nisa-{}-binding".format(hookenv.service_name()), + "--ignore-not-found", + ) + kubectl( + "delete", + "configmap", + "nginx-load-balancer-{}-conf".format(hookenv.service_name()), + "--ignore-not-found", + ) + except CalledProcessError: + # try again next time + return + + remove_state("kubernetes-worker.remove-old-ingress") + + +def set_upgrade_needed(): + set_state("kubernetes-worker.snaps.upgrade-needed") + config = hookenv.config() + previous_channel = config.previous("channel") + require_manual = config.get("require-manual-upgrade") + if previous_channel is None or not require_manual: + set_state("kubernetes-worker.snaps.upgrade-specified") + + +def cleanup_pre_snap_services(): + # remove old states + remove_state("kubernetes-worker.components.installed") + + # disable old services + services = ["kubelet", "kube-proxy"] + for service in services: + hookenv.log("Stopping {0} service.".format(service)) + service_stop(service) + + # cleanup old files + files = [ + "/lib/systemd/system/kubelet.service", + "/lib/systemd/system/kube-proxy.service", + "/etc/default/kube-default", + "/etc/default/kubelet", + "/etc/default/kube-proxy", + "/usr/local/bin/kubectl", + "/usr/local/bin/kubelet", + "/usr/local/bin/kube-proxy", + "/etc/kubernetes", + ] + for file in files: + if os.path.isdir(file): + hookenv.log("Removing directory: " + file) + shutil.rmtree(file) + elif os.path.isfile(file): + hookenv.log("Removing file: " + file) + os.remove(file) + + +@when("config.changed.channel") +def channel_changed(): + set_upgrade_needed() + + +@when("kubernetes-worker.snaps.upgrade-specified") +def install_snaps(): + channel = hookenv.config("channel") + hookenv.status_set("maintenance", "Installing core snap") + snap.install("core") + hookenv.status_set("maintenance", "Installing kubectl snap") + snap.install("kubectl", channel=channel, classic=True) + hookenv.status_set("maintenance", "Installing kubelet snap") + snap.install("kubelet", channel=channel, classic=True) + hookenv.status_set("maintenance", "Installing kube-proxy snap") + snap.install("kube-proxy", channel=channel, classic=True) + calculate_and_store_resource_checksums(checksum_prefix, snap_resources) + set_state("kubernetes-node.snaps.installed") + set_state("kubernetes-worker.restart-needed") + remove_state("kubernetes-worker.snaps.upgrade-needed") + remove_state("kubernetes-worker.snaps.upgrade-specified") + + +@when("kubernetes-node.snaps.installed", "kube-control.cohort_keys.available") +@when_none("coordinator.granted.cohort", "coordinator.requested.cohort") +def safely_join_cohort(): + """Coordinate the rollout of snap refreshes. + + When cohort keys change, grab a lock so that only 1 unit in the + application joins the new cohort at a time. This allows us to roll out + snap refreshes without risking all units going down at once. + """ + kube_control = endpoint_from_flag("kube-control.cohort_keys.available") + + cohort_keys = kube_control.cohort_keys + if is_data_changed("master-cohorts", cohort_keys): + clear_flag("kubernetes-worker.cohorts.joined") + charms.coordinator.acquire("cohort") + + +@when( + "kubernetes-node.snaps.installed", + "kube-control.cohort_keys.available", + "coordinator.granted.cohort", +) +@when_not("kubernetes-worker.cohorts.joined") +def join_or_update_cohorts(): + """Join or update a cohort snapshot. + + All units of this application (leader and followers) need to refresh their + installed snaps to the current cohort snapshot. + """ + kube_control = endpoint_from_flag("kube-control.cohort_keys.available") + cohort_keys = kube_control.cohort_keys + for snapname in cohort_snaps: + hookenv.status_set("maintenance", "Joining cohort for {}.".format(snapname)) + cohort_key = cohort_keys[snapname] + for delay in (5, 30, 60): + try: + snap.join_cohort_snapshot(snapname, cohort_key) + hookenv.log("Joined cohort for {}".format(snapname)) + break + except subprocess.CalledProcessError: + hookenv.log( + "Error joining cohort for {}".format(snapname), level=hookenv.ERROR + ) + hookenv.status_set( + "maintenance", + "Error joining cohort for {} (see logs), " + "will retry.".format(snapname), + ) + time.sleep(delay) + else: + set_flag("kubernetes-worker.cohorts.failed") + return + # Update our cache of the cohort keys, now that they're successfully applied. + data_changed("master-cohorts", cohort_keys) + set_flag("kubernetes-worker.cohorts.joined") + clear_flag("kubernetes-worker.cohorts.failed") + + +@when_none("coordinator.granted.cohort", "coordinator.requested.cohort") +@when("kubernetes-worker.cohorts.failed") +def reaquire_coordinator_lock(): + # We can't do this in the same hook that the cohort join failed, + # because if we request the lock when we already have it, it's + # treated as a no-op and then dropped at the end of the hook. + charms.coordinator.acquire("cohort") + + +@hook("stop") +def shutdown(): + """When this unit is destroyed: + - delete the current node + - stop the worker services + """ + try: + if os.path.isfile(kubelet_kubeconfig_path): + kubectl("delete", "node", get_node_name()) + except CalledProcessError: + hookenv.log("Failed to unregister node.") + service_stop("snap.kubelet.daemon") + service_stop("snap.kube-proxy.daemon") + + +@when("kubernetes-node.snaps.installed") +def set_app_version(): + """Declare the application version to juju""" + cmd = ["kubelet", "--version"] + version = check_output(cmd) + hookenv.application_version_set(version.split(b" v")[-1].rstrip()) + + +@hookenv.atexit +def charm_status(): + """Update the status message with the current status of kubelet.""" + container_runtime_connected = is_state("endpoint.container-runtime.joined") + vsphere_joined = is_state("endpoint.vsphere.joined") + azure_joined = is_state("endpoint.azure.joined") + cloud_blocked = is_state("kubernetes-worker.cloud.blocked") + + if is_state("upgrade.series.in-progress"): + hookenv.status_set("blocked", "Series upgrade in progress") + return + if not is_flag_set("certificates.available"): + hookenv.status_set("blocked", "Missing relation to certificate authority.") + return + if not container_runtime_connected: + hookenv.status_set("blocked", "Connect a container runtime.") + return + if vsphere_joined and cloud_blocked: + hookenv.status_set( + "blocked", "vSphere integration requires K8s 1.12 or greater" + ) + return + if azure_joined and cloud_blocked: + hookenv.status_set("blocked", "Azure integration requires K8s 1.11 or greater") + return + if not is_flag_set("kubernetes.cni-plugins.installed"): + hookenv.status_set("blocked", "Missing CNI resource") + return + if is_state("kubernetes-worker.cloud.pending"): + hookenv.status_set("waiting", "Waiting for cloud integration") + return + if is_state("kubernetes-worker.cohorts.failed"): + hookenv.status_set( + "waiting", "Failed to join snap cohorts (see logs), will retry." + ) + if missing_kube_control(): + # the check calls status_set + return + if not any_flags_set( + "kube-control.api_endpoints.available", "kube-api-endpoint.available" + ): + hookenv.status_set("waiting", "Waiting for cluster endpoint.") + return + if not get_kube_api_servers(): + hookenv.status_set("waiting", "Unable to determine cluster endpoint.") + return + if not is_state("kube-control.auth.available"): + hookenv.status_set("waiting", "Waiting for cluster credentials.") + return + if not is_state("kube-control.dns.available"): + # During deployment the worker has to start kubelet without cluster dns + # configured. If this is the first unit online in a service pool + # waiting to self host the dns pod, and configure itself to query the + # dns service declared in the kube-system namespace + hookenv.status_set("waiting", "Waiting for cluster DNS.") + return + if is_state("kubernetes-worker.snaps.upgrade-specified"): + hookenv.status_set("waiting", "Upgrade pending") + return + if is_state("kubernetes-worker.snaps.upgrade-needed"): + hookenv.status_set("blocked", "Needs manual upgrade, run the upgrade action") + return + if is_state("kubernetes-node.snaps.installed"): + update_kubelet_status() + return + else: + pass # will have been set by snap layer or other handler + + +def deprecated_extra_args(): + """Returns a list of tuples (config_key, arg) for args that have been set + via extra-args, but are deprecated. + + This works by parsing help output, which can be brittle. Be cautious when + calling this. + """ + deprecated_args = [] + services = [ + # service config_key + ("kubelet", "kubelet-extra-args"), + ("kube-proxy", "proxy-extra-args"), + ] + for service, config_key in services: + # Parse help output into a format we can check easily + cmd = [service, "-h"] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + sections = re.split(r"\n\s*(?:-\S, )?--", output.decode("utf-8"))[1:] + partitioned_sections = [section.partition(" ") for section in sections] + arg_help = {part[0]: part[2] for part in partitioned_sections} + + # Check extra-args against the help output + extra_args = parse_extra_args(config_key) + for arg in extra_args: + if arg not in arg_help: + # This is most likely a problem, though it could also be + # intentional use of a hidden arg. Let's just log a warning. + hookenv.log( + "%s: %s is missing from help output" % (config_key, arg), + level="WARNING", + ) + elif "DEPRECATED:" in arg_help[arg]: + deprecated_args.append((config_key, arg)) + return deprecated_args + + +def update_kubelet_status(): + """There are different states that the kubelet can be in, where we are + waiting for dns, waiting for cluster turnup, or ready to serve + applications.""" + # deprecated_extra_args is brittle, be cautious + deprecated_args = [] + try: + deprecated_args = deprecated_extra_args() + except Exception: + # this isn't vital, log it and move on + traceback.print_exc() + if deprecated_args: + messages = ["%s: %s is deprecated" % arg for arg in deprecated_args] + for message in messages: + hookenv.log(message, level="WARNING") + status = messages[0] + if len(messages) > 1: + other_count = len(messages) - 1 + status += " (+%d others, see juju debug-log)" % other_count + hookenv.status_set("blocked", status) + return + + services = ["kubelet", "kube-proxy"] + failing_services = [] + for service in services: + daemon = "snap.{}.daemon".format(service) + if not _systemctl_is_active(daemon): + failing_services.append(service) + if failing_services: + msg = "Waiting for {} to start.".format(",".join(failing_services)) + hookenv.status_set("waiting", msg) + return + + parenthetical = "" + if is_state("nvidia.ready") and not is_state("kubernetes-worker.gpu.enabled"): + parenthetical = " (without gpu support)" + + hookenv.status_set("active", f"Kubernetes worker running{parenthetical}.") + + +@when( + "certificates.available", + "kube-control.connected", + "cni.available", + "kube-control.dns.available", +) +def send_data(): + """Send the data that is required to create a server certificate for + this server.""" + # Use the public ip of this unit as the Common Name for the certificate. + common_name = hookenv.unit_public_ip() + + ingress_ip = get_node_ip() + bind_addrs = kubernetes_common.get_bind_addrs() + + # Create SANs that the tls layer will add to the server cert. + sans = [hookenv.unit_public_ip(), ingress_ip, gethostname()] + bind_addrs + + # Request a server cert with this information. + layer.tls_client.request_server_cert( + common_name, + sorted(set(sans)), + crt_path=server_crt_path, + key_path=server_key_path, + ) + + # Request a client cert for kubelet. + layer.tls_client.request_client_cert( + "system:kubelet", crt_path=client_crt_path, key_path=client_key_path + ) + + +@when( + "kube-control.dns.available", + "cni.available", + "endpoint.container-runtime.available", +) +@when_any("kube-control.api_endpoints.available", "kube-api-endpoint.available") +def watch_for_changes(): + """Watch for configuration changes and signal if we need to restart the + worker services""" + kube_control = endpoint_from_flag("kube-control.dns.available") + container_runtime = endpoint_from_flag("endpoint.container-runtime.available") + + servers = get_kube_api_servers() + dns = kube_control.get_dns() + cluster_cidr = kubernetes_common.cluster_cidr() + container_runtime_name = container_runtime.get_runtime() + container_runtime_socket = container_runtime.get_socket() + container_runtime_nvidia = container_runtime.get_nvidia_enabled() + + if container_runtime_nvidia: + set_state("nvidia.ready") + else: + remove_state("nvidia.ready") + + if ( + data_changed("kube-api-servers", servers) + or data_changed("kube-dns", dns) + or data_changed("cluster-cidr", cluster_cidr) + or data_changed("container-runtime", container_runtime_name) + or data_changed("container-socket", container_runtime_socket) + ): + set_state("kubernetes-worker.restart-needed") + + +@when( + "kubernetes-node.snaps.installed", + "tls_client.ca.saved", + "tls_client.certs.saved", + "kube-control.dns.available", + "kube-control.auth.available", + "cni.available", + "kubernetes-worker.restart-needed", + "worker.auth.bootstrapped", + "endpoint.container-runtime.available", + "kube-control.default_cni.available", +) +@when_not( + "kubernetes-worker.cloud.pending", + "kubernetes-worker.cloud.blocked", + "upgrade.series.in-progress", +) +@when_any( + "kube-control.api_endpoints.available", + "kube-api-endpoint.available", + "endpoint.kube-control.has-xcp.changed", +) +def start_worker(): + """Start kubelet using the provided API and DNS info.""" + # Note that the DNS server doesn't necessarily exist at this point. We know + # what its IP will eventually be, though, so we can go ahead and configure + # kubelet with that info. This ensures that early pods are configured with + # the correct DNS even though the server isn't ready yet. + kube_control = endpoint_from_flag("kube-control.dns.available") + + servers = get_kube_api_servers() + dns = kube_control.get_dns() + dns_domain = dns["domain"] + dns_ip = dns["sdn-ip"] + registry = get_registry_location() + cluster_cidr = kubernetes_common.cluster_cidr() + + if cluster_cidr is None: + hookenv.log("Waiting for cluster cidr.") + return + + if not servers: + hookenv.log("Waiting for API server URL") + return + + if kubernetes_common.is_ipv6(cluster_cidr): + kubernetes_common.enable_ipv6_forwarding() + + creds = db.get("credentials") + data_changed("kube-control.creds", creds) + + create_config(servers[get_unit_number() % len(servers)], creds) + configure_default_cni(kube_control.get_default_cni()) + configure_kubelet(dns_domain, dns_ip, registry, has_xcp=kube_control.has_xcp) + configure_kube_proxy(configure_prefix, servers, cluster_cidr) + set_state("kubernetes-worker.config.created") + restart_unit_services() + update_kubelet_status() + set_state("kubernetes-worker.label-config-required") + set_state("nrpe-external-master.reconfigure") + remove_state("kubernetes-worker.restart-needed") + remove_state("endpoint.kube-control.has-xcp.changed") + + +@when("node.label-config-required", "kubernetes-worker.config.created") +def apply_node_labels(): + # Label configuration complete. + label_maker = LabelMaker(kubeclientconfig_path) + try: + label_maker.apply_node_labels() + except LabelMaker.NodeLabelError: + return + remove_state("node.label-config-required") + + +@when_any( + "config.changed.kubelet-extra-args", + "config.changed.proxy-extra-args", + "config.changed.kubelet-extra-config", +) +def config_changed_requires_restart(): + # LP bug #1826833, always delete the state file when extra config changes + # since CPU manager doesn’t support offlining and onlining of CPUs at runtime. + if os.path.isfile(cpu_manager_state): + hookenv.log("Removing file: " + cpu_manager_state) + os.remove(cpu_manager_state) + set_state("kubernetes-worker.restart-needed") + + +@when_any("tls_client.certs.changed", "tls_client.ca.written") +def restart_for_certs(): + set_state("kubernetes-worker.restart-needed") + remove_state("tls_client.certs.changed") + remove_state("tls_client.ca.written") + + +def create_config(server, creds): + """Create a kubernetes configuration for the worker unit.""" + # Create kubernetes configuration in the default location for ubuntu. + create_kubeconfig( + "/home/ubuntu/.kube/config", + server, + ca_crt_path, + token=creds["client_token"], + user="ubuntu", + ) + # Make the config dir readable by the ubuntu users so juju scp works. + cmd = ["chown", "-R", "ubuntu:ubuntu", "/home/ubuntu/.kube"] + check_call(cmd) + # Create kubernetes configuration in the default location for root. + create_kubeconfig( + kubeclientconfig_path, + server, + ca_crt_path, + token=creds["client_token"], + user="root", + ) + # Create kubernetes configuration for kubelet, and kube-proxy services. + create_kubeconfig( + kubelet_kubeconfig_path, + server, + ca_crt_path, + token=creds["kubelet_token"], + user="kubelet", + ) + create_kubeconfig( + kubeproxyconfig_path, + server, + ca_crt_path, + token=creds["proxy_token"], + user="kube-proxy", + ) + cni = endpoint_from_name("cni") + if cni: + cni.notify_kubeconfig_changed() + + +@when("config.changed.ingress") +def toggle_ingress_state(): + """Ingress is a toggled state. Remove ingress.available if set when + toggled""" + if hookenv.config("ingress"): + set_state("kubernetes-worker.ingress.enabled") + else: + remove_state("kubernetes-worker.ingress.enabled") + + +@when_any( + "config.changed.default-backend-image", + "config.changed.ingress-ssl-chain-completion", + "config.changed.nginx-image", + "config.changed.ingress-ssl-passthrough", + "config.changed.ingress-default-ssl-certificate", + "config.changed.ingress-default-ssl-key", +) +def reconfigure_ingress(): + remove_state("kubernetes-worker.ingress.available") + + +@when("kubernetes-worker.config.created", "kubernetes-worker.ingress.enabled") +@when_not("kubernetes-worker.ingress.available") +def render_and_launch_ingress(): + """Launch the Kubernetes ingress controller & default backend (404)""" + config = hookenv.config() + + # need to test this in case we get in + # here from a config change to the image + if not config.get("ingress"): + return + + context = {} + context["arch"] = arch() + addon_path = "/root/cdk/addons/{}" + context["juju_application"] = hookenv.service_name() + + # If present, workers will get the ingress containers from the configured + # registry. Otherwise, we'll set an appropriate upstream image registry. + registry_location = get_registry_location() + + context["defaultbackend_image"] = config.get("default-backend-image") + if ( + context["defaultbackend_image"] == "" + or context["defaultbackend_image"] == "auto" + ): + if registry_location: + backend_registry = registry_location + else: + backend_registry = "k8s.gcr.io" + if context["arch"] == "s390x": + context["defaultbackend_image"] = "{}/defaultbackend-s390x:1.4".format( + backend_registry + ) + elif context["arch"] == "ppc64el": + context["defaultbackend_image"] = "{}/defaultbackend-ppc64le:1.5".format( + backend_registry + ) + else: + context["defaultbackend_image"] = "{}/defaultbackend-{}:1.5".format( + backend_registry, context["arch"] + ) + + # Render the ingress daemon set controller manifest + context["ssl_chain_completion"] = config.get("ingress-ssl-chain-completion") + context["enable_ssl_passthrough"] = config.get("ingress-ssl-passthrough") + context["default_ssl_certificate_option"] = None + if config.get("ingress-default-ssl-certificate") and config.get( + "ingress-default-ssl-key" + ): + context["default_ssl_certificate"] = b64encode( + config.get("ingress-default-ssl-certificate").encode("utf-8") + ).decode("utf-8") + context["default_ssl_key"] = b64encode( + config.get("ingress-default-ssl-key").encode("utf-8") + ).decode("utf-8") + default_certificate_option = ( + "- --default-ssl-certificate=" "$(POD_NAMESPACE)/default-ssl-certificate" + ) + context["default_ssl_certificate_option"] = default_certificate_option + context["ingress_image"] = config.get("nginx-image") + if context["ingress_image"] == "" or context["ingress_image"] == "auto": + if context["arch"] == "ppc64el": + # multi-arch image doesn't include ppc64le, have to use an older version + image = "nginx-ingress-controller-ppc64le" + context["ingress_uid"] = "33" + context["ingress_image"] = "/".join( + [ + registry_location or "quay.io", + "kubernetes-ingress-controller/{}:0.20.0".format(image), + ] + ) + else: + context["ingress_uid"] = "101" + context["ingress_image"] = "/".join( + [ + registry_location or "us.gcr.io", + "k8s-artifacts-prod/ingress-nginx/controller:v1.2.0", + ] + ) + + kubelet_version = get_version("kubelet") + if kubelet_version < (1, 9): + context["daemonset_api_version"] = "extensions/v1beta1" + context["deployment_api_version"] = "extensions/v1beta1" + elif kubelet_version < (1, 16): + context["daemonset_api_version"] = "apps/v1beta2" + context["deployment_api_version"] = "extensions/v1beta1" + else: + context["daemonset_api_version"] = "apps/v1" + context["deployment_api_version"] = "apps/v1" + context["use_forwarded_headers"] = ( + "true" if config.get("ingress-use-forwarded-headers") else "false" + ) + + manifest = addon_path.format("ingress-daemon-set.yaml") + render("ingress-daemon-set.yaml", manifest, context) + hookenv.log("Creating the ingress daemon set.") + try: + kubectl("apply", "-f", manifest) + except CalledProcessError as e: + hookenv.log(e) + hookenv.log( + "Failed to create ingress controller. Will attempt again next update." + ) # noqa + hookenv.close_port(80) + hookenv.close_port(443) + return + + # Render the default http backend (404) deployment manifest + # needs to happen after ingress-daemon-set since that sets up the namespace + manifest = addon_path.format("default-http-backend.yaml") + render("default-http-backend.yaml", manifest, context) + hookenv.log("Creating the default http backend.") + try: + kubectl("apply", "-f", manifest) + except CalledProcessError as e: + hookenv.log(e) + hookenv.log( + "Failed to create default-http-backend. Will attempt again next update." + ) # noqa + hookenv.close_port(80) + hookenv.close_port(443) + return + + set_state("kubernetes-worker.ingress.available") + hookenv.open_port(80) + hookenv.open_port(443) + + +@when("kubernetes-worker.config.created", "kubernetes-worker.ingress.available") +@when_not("kubernetes-worker.ingress.enabled") +def disable_ingress(): + hookenv.log("Deleting the http backend and ingress.") + hookenv.close_port(80) + hookenv.close_port(443) + try: + kubectl( + "delete", + "--ignore-not-found", + "-f", + "/root/cdk/addons/default-http-backend.yaml", + ) + kubectl( + "delete", + "--ignore-not-found", + "-f", + "/root/cdk/addons/ingress-daemon-set.yaml", + ) + except CalledProcessError: + traceback.print_exc() + hookenv.log("Failed to disable ingress, waiting to retry") + return + remove_state("kubernetes-worker.ingress.available") + + +def restart_unit_services(): + """Restart worker services.""" + hookenv.log("Restarting kubelet and kube-proxy.") + services = ["kube-proxy", "kubelet"] + for service in services: + service_restart("snap.%s.daemon" % service) + + +def get_kube_api_servers(): + """Return the list of kubernetes API endpoint URLs.""" + kube_control = endpoint_from_name("kube-control") + kube_api = endpoint_from_name("kube-api-endpoint") + # prefer kube-api-endpoints + if kube_api.services(): + return [ + "https://{0}:{1}".format(unit["hostname"], unit["port"]) + for service in kube_api.services() + for unit in service["hosts"] + ] + if hasattr(kube_control, "get_api_endpoints"): + return kube_control.get_api_endpoints() + hookenv.log( + "Unable to determine API server URLs from either kube-control " + "or kube-api-endpoint relation", + hookenv.ERROR, + ) + return [] + + +@when("kubernetes-worker.config.created") +@when("nrpe-external-master.available") +@when("kube-control.auth.available") +@when_any( + "config.changed.nagios_context", + "config.changed.nagios_servicegroups", + "nrpe-external-master.reconfigure", +) +@when_any("kube-control.api_endpoints.available", "kube-api-endpoint.available") +def update_nrpe_config(): + services = ["snap.{}.daemon".format(s) for s in worker_services] + data = render("nagios_plugin.py", None, {"node_name": get_node_name()}) + plugin_path = install_nagios_plugin_from_text(data, "check_k8s_worker.py") + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe_setup.add_check("node", "Node registered with API Server", str(plugin_path)) + nrpe.add_init_service_checks(nrpe_setup, services, current_unit) + nrpe_setup.write() + + creds = db.get("credentials") + servers = get_kube_api_servers() + if creds and servers: + server = servers[get_unit_number() % len(servers)] + create_kubeconfig( + nrpe_kubeconfig_path, + server, + ca_crt_path, + token=creds["client_token"], + user="nagios", + ) + # Make sure Nagios dirs are the correct permissions. + cmd = ["chown", "-R", "nagios:nagios"] + for p in ["/var/lib/nagios/", os.path.dirname(nrpe_kubeconfig_path)]: + if os.path.exists(p): + check_call(cmd + [p]) + + remove_state("nrpe-external-master.reconfigure") + set_state("nrpe-external-master.initial-config") + # request CPU governor check from nrpe relation to be performance + rel_settings = { + "requested_cpu_governor": "performance", + } + for rid in hookenv.relation_ids("nrpe-external-master"): + hookenv.relation_set(relation_id=rid, relation_settings=rel_settings) + + +@when_not("nrpe-external-master.available") +@when("nrpe-external-master.initial-config") +def remove_nrpe_config(): + remove_state("nrpe-external-master.initial-config") + remove_nagios_plugin("check_k8s_worker.py") + + # The current nrpe-external-master interface doesn't handle a lot of logic, + # use the charm-helpers code for now. + hostname = nrpe.get_nagios_hostname() + nrpe_setup = nrpe.NRPE(hostname=hostname) + + for service in worker_services: + nrpe_setup.remove_check(shortname=service) + nrpe_setup.remove_check(shortname="node") + + +@when("nvidia.ready") +@when("kubernetes-worker.config.created") +@when_not("kubernetes-worker.gpu.enabled") +def enable_gpu(): + """Enable GPU usage on this node.""" + hookenv.log("Enabling gpu mode") + try: + # Not sure why this is necessary, but if you don't run this, k8s will + # think that the node has 0 gpus (as shown by the output of + # `kubectl get nodes -o yaml` + check_call(["nvidia-smi"]) + except CalledProcessError as cpe: + hookenv.log("Unable to communicate with the NVIDIA driver.") + hookenv.log(cpe) + return + except FileNotFoundError as fne: + hookenv.log("NVIDIA SMI not installed.") + hookenv.log(fne) + return + + label_maker = LabelMaker(kubeclientconfig_path) + label_maker.set_label("gpu", "true") + label_maker.set_label("cuda", "true") + + set_state("kubernetes-worker.gpu.enabled") + set_state("kubernetes-worker.restart-needed") + + +@when("kubernetes-worker.gpu.enabled") +@when_not("nvidia.ready") +@when_not("kubernetes-worker.restart-needed") +def nvidia_departed(): + """Cuda departed.""" + disable_gpu() + remove_state("kubernetes-worker.gpu.enabled") + set_state("kubernetes-worker.restart-needed") + + +def disable_gpu(): + """Disable GPU usage on this node.""" + hookenv.log("Disabling gpu mode") + + # Remove node labels + label_maker = LabelMaker(kubeclientconfig_path) + label_maker.remove_label("gpu") + label_maker.remove_label("cuda") + + +@when("kubernetes-worker.gpu.enabled") +@when("kube-control.connected") +def notify_control_plane_gpu_enabled(kube_control): + """Notify kubernetes-control-plane that we're gpu-enabled.""" + kube_control.set_gpu(True) + + +@when_not("kubernetes-worker.gpu.enabled") +@when("kube-control.connected") +def notify_control_plane_gpu_not_enabled(kube_control): + """Notify kubernetes-control-plane that we're not gpu-enabled.""" + kube_control.set_gpu(False) + + +@when("kube-control.connected") +def request_kubelet_and_proxy_credentials(kube_control): + """Request kubelet node authorization with a well formed kubelet user. + This also implies that we are requesting kube-proxy auth.""" + + # The kube-cotrol interface is created to support RBAC. + # At this point we might as well do the right thing and return the hostname + # even if it will only be used when we enable RBAC + nodeuser = "system:node:{}".format(get_node_name().lower()) + kube_control.set_auth_request(nodeuser) + + +@when("kube-control.connected") +def catch_change_in_creds(kube_control): + """Request a service restart in case credential updates were detected.""" + nodeuser = "system:node:{}".format(get_node_name().lower()) + creds = kube_control.get_auth_credentials(nodeuser) + if creds and creds["user"] == nodeuser: + # We need to cache the credentials here because if the + # control-plane changes (control-plane leader dies and replaced by a new one) + # the new control-plane will have no recollection of our certs. + db.set("credentials", creds) + set_state("worker.auth.bootstrapped") + if data_changed("kube-control.creds", creds): + set_state("kubernetes-worker.restart-needed") + + +def missing_kube_control(): + """Inform the operator they need to add the kube-control relation. + + If deploying via bundle this won't happen, but if operator is upgrading a + a charm in a deployment that pre-dates the kube-control relation, it'll be + missing. + + Called from charm_status. + """ + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + + if "kube-control" in goal_state.get("relations", {}): + if not is_flag_set("kube-control.connected"): + hookenv.status_set( + "waiting", "Waiting for kubernetes-control-plane to become ready" + ) + return True + else: + hookenv.status_set( + "blocked", + "Relate {}:kube-control kubernetes-control-plane:kube-control".format( + hookenv.service_name() + ), + ) + return True + return False + + +def _systemctl_is_active(application): + """Poll systemctl to determine if the application is running""" + cmd = ["systemctl", "is-active", application] + try: + raw = check_output(cmd) + return b"active" in raw + except Exception: + return False + + +@when_any( + "endpoint.aws.joined", + "endpoint.gcp.joined", + "endpoint.openstack.joined", + "endpoint.vsphere.joined", + "endpoint.azure.joined", +) +@when_not("kubernetes-worker.cloud.ready") +def set_cloud_pending(): + k8s_version = get_version("kubelet") + k8s_1_11 = k8s_version >= (1, 11) + k8s_1_12 = k8s_version >= (1, 12) + vsphere_joined = is_state("endpoint.vsphere.joined") + azure_joined = is_state("endpoint.azure.joined") + if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11): + set_state("kubernetes-worker.cloud.blocked") + else: + remove_state("kubernetes-worker.cloud.blocked") + set_state("kubernetes-worker.cloud.pending") + + +@when_any("endpoint.aws.joined", "endpoint.gcp.joined", "endpoint.azure.joined") +@when("kube-control.cluster_tag.available") +@when_not("kubernetes-worker.cloud.request-sent") +def request_integration(): + hookenv.status_set("maintenance", "requesting cloud integration") + kube_control = endpoint_from_flag("kube-control.cluster_tag.available") + cluster_tag = kube_control.get_cluster_tag() + if is_state("endpoint.aws.joined"): + cloud = endpoint_from_flag("endpoint.aws.joined") + cloud.tag_instance( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + } + ) + cloud.tag_instance_security_group( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + } + ) + cloud.tag_instance_subnet( + { + "kubernetes.io/cluster/{}".format(cluster_tag): "owned", + } + ) + cloud.enable_object_storage_management(["kubernetes-*"]) + elif is_state("endpoint.gcp.joined"): + cloud = endpoint_from_flag("endpoint.gcp.joined") + cloud.label_instance( + { + "k8s-io-cluster-name": cluster_tag, + } + ) + cloud.enable_object_storage_management() + elif is_state("endpoint.azure.joined"): + cloud = endpoint_from_flag("endpoint.azure.joined") + cloud.tag_instance( + { + "k8s-io-cluster-name": cluster_tag, + } + ) + cloud.enable_object_storage_management() + cloud.enable_instance_inspection() + cloud.enable_dns_management() + set_state("kubernetes-worker.cloud.request-sent") + hookenv.status_set("waiting", "Waiting for cloud integration") + + +@when_none( + "endpoint.aws.joined", + "endpoint.gcp.joined", + "endpoint.openstack.joined", + "endpoint.vsphere.joined", + "endpoint.azure.joined", +) +@when_any( + "kubernetes-worker.cloud.pending", + "kubernetes-worker.cloud.request-sent", + "kubernetes-worker.cloud.blocked", + "kubernetes-worker.cloud.ready", +) +def clear_cloud_flags(): + remove_state("kubernetes-worker.cloud.pending") + remove_state("kubernetes-worker.cloud.request-sent") + remove_state("kubernetes-worker.cloud.blocked") + remove_state("kubernetes-worker.cloud.ready") + set_state("kubernetes-worker.restart-needed") # force restart + + +@when_any( + "endpoint.aws.ready", + "endpoint.gcp.ready", + "endpoint.openstack.ready", + "endpoint.vsphere.ready", + "endpoint.azure.ready", +) +@when_not("kubernetes-worker.cloud.blocked", "kubernetes-worker.cloud.ready") +def cloud_ready(): + remove_state("kubernetes-worker.cloud.pending") + if is_state("endpoint.gcp.ready"): + write_gcp_snap_config("kubelet") + elif is_state("endpoint.azure.ready"): + write_azure_snap_config("kubelet") + set_state("kubernetes-worker.cloud.ready") + set_state("kubernetes-worker.restart-needed") # force restart + + +def get_first_mount(mount_relation): + mount_relation_list = mount_relation.mounts() + if mount_relation_list and len(mount_relation_list) > 0: + # mount relation list is a list of the mount layer relations + # for now we just use the first one that is nfs + for mount in mount_relation_list: + # for now we just check the first mount and use that. + # the nfs charm only supports one for now. + if "mounts" in mount and mount["mounts"][0]["fstype"] == "nfs": + return mount["mounts"][0] + return None + + +@when("scrape.available") +def scrape_available(scrape): + if hookenv.config().get("ingress"): + scrape.configure( + port=10254, + labels=dict( + juju_model=hookenv.model_name(), + juju_model_uuid=hookenv.model_uuid(), + juju_application=hookenv.application_name(), + juju_unit=hookenv.local_unit(), + service="nginx-ingress", + ), + ) + + +@when("nfs.available") +def nfs_state_control(mount): + """Determine if we should remove the state that controls the re-render + and execution of the nfs-relation-changed event because there + are changes in the relationship data, and we should re-render any + configs""" + + mount_data = get_first_mount(mount) + if mount_data: + nfs_relation_data = { + "options": mount_data["options"], + "host": mount_data["hostname"], + "mountpoint": mount_data["mountpoint"], + "fstype": mount_data["fstype"], + } + + # Re-execute the rendering if the data has changed. + if data_changed("nfs-config", nfs_relation_data): + hookenv.log("reconfiguring nfs") + remove_state("nfs.configured") + + +@when("nfs.available") +@when_not("nfs.configured") +def nfs_storage(mount): + """NFS on kubernetes requires nfs config rendered into a deployment of + the nfs client provisioner. That will handle the persistent volume claims + with no persistent volume to back them.""" + + mount_data = get_first_mount(mount) + if not mount_data: + return + + # If present, use the configured registry to define the nfs image location. + registry_location = get_registry_location() + if registry_location: + mount_data["registry"] = registry_location + + addon_path = "/root/cdk/addons/{}" + # Render the NFS deployment + manifest = addon_path.format("nfs-provisioner.yaml") + render("nfs-provisioner.yaml", manifest, mount_data) + hookenv.log("Creating the nfs provisioner.") + try: + kubectl("apply", "-f", manifest) + except CalledProcessError as e: + hookenv.log(e) + hookenv.log( + "Failed to create nfs provisioner. Will attempt again next update." + ) # noqa + return + + set_state("nfs.configured") + + +@when("kube-control.registry_location.available") +def update_registry_location(): + """Handle changes to the container image registry. + + Monitor the image registry location. If it changes, manage flags to ensure + our image-related handlers will be invoked with an accurate registry. + """ + registry_location = get_registry_location() + + if registry_location: + runtime = endpoint_from_flag("endpoint.container-runtime.available") + if runtime: + # Construct and send the sandbox image (pause container) to our runtime + uri = get_sandbox_image_uri(registry_location) + runtime.set_config(sandbox_image=uri) + + if data_changed("registry-location", registry_location): + remove_state("kubernetes-worker.config.created") + remove_state("kubernetes-worker.ingress.available") + remove_state("nfs.configured") + set_state("kubernetes-worker.restart-needed") + + +def get_registry_location(): + """Get the image registry from the kube-control relation. + + If an image-registry has been configured on the k8s-control-plane, it will be set + set on the kube-control relation. This function returns that value stripped + of any trailing slash. If the relation or registry location are missing, + this returns an empty string. + """ + kube_control = endpoint_from_flag("kube-control.registry_location.available") + if kube_control: + rel_registry = kube_control.get_registry_location() + registry = rel_registry.rstrip("/") if rel_registry else "" + else: + registry = "" + + return registry + + +@when("ingress-proxy.available") +def configure_ingress_proxy(ingress_proxy): + ingress_proxy.configure(port="80") diff --git a/kubernetes-worker/reactive/leadership.py b/kubernetes-worker/reactive/leadership.py new file mode 100644 index 0000000..29c6f3a --- /dev/null +++ b/kubernetes-worker/reactive/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.leadership import leader_get, leader_set + + +__all__ = ['leader_get', 'leader_set'] # Backwards compatibility + + +def initialize_leadership_state(): + '''Initialize leadership.* states from the hook environment. + + Invoked by hookenv.atstart() so states are available in + @hook decorated handlers. + ''' + is_leader = hookenv.is_leader() + if is_leader: + hookenv.log('Initializing Leadership Layer (is leader)') + else: + hookenv.log('Initializing Leadership Layer (is follower)') + + reactive.helpers.toggle_state('leadership.is_leader', is_leader) + + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + current = hookenv.leader_get() + + # Handle deletions. + for key in set(previous.keys()) - set(current.keys()): + current[key] = None + + any_changed = False + for key, value in current.items(): + reactive.helpers.toggle_state('leadership.changed.{}'.format(key), + value != previous.get(key)) + if value != previous.get(key): + any_changed = True + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + reactive.helpers.toggle_state('leadership.changed', any_changed) + + unitdata.kv().update(current, prefix='leadership.settings.') + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(reactive, '_leadership_registered'): + hookenv.atstart(initialize_leadership_state) + reactive._leadership_registered = True diff --git a/kubernetes-worker/reactive/snap.py b/kubernetes-worker/reactive/snap.py new file mode 100644 index 0000000..2220648 --- /dev/null +++ b/kubernetes-worker/reactive/snap.py @@ -0,0 +1,341 @@ +# Copyright 2016-2019 Canonical Ltd. +# +# This file is part of the Snap layer for Juju. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +charms.reactive helpers for dealing with Snap packages. +""" +from collections import OrderedDict +from distutils.version import LooseVersion +import os.path +from os import uname +import shutil +import subprocess +from textwrap import dedent +import time +from urllib.request import urlretrieve + +from charmhelpers.core import hookenv, host +from charmhelpers.core.hookenv import ERROR +from charmhelpers.core.host import write_file +from charms import layer +from charms import reactive +from charms.layer import snap +from charms.reactive import register_trigger, when, when_not, toggle_flag +from charms.reactive.helpers import data_changed + + +class UnsatisfiedMinimumVersionError(Exception): + def __init__(self, desired, actual): + super().__init__() + self.desired = desired + self.actual = actual + + def __str__(self): + return "Could not install snapd >= {0.desired}, got {0.actual}".format(self) + + +class InvalidBundleError(Exception): + pass + + +def sorted_snap_opts(): + opts = layer.options("snap") + opts = sorted(opts.items(), key=lambda item: item[0] != "core") + opts = OrderedDict(opts) + return opts + + +def install(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + opts = sorted_snap_opts() + # supported-architectures is EXPERIMENTAL and undocumented. + # It probably should live in the base layer, blocking the charm + # during bootstrap if the arch is unsupported. + arch = uname().machine + for snapname, snap_opts in opts.items(): + supported_archs = snap_opts.pop("supported-architectures", None) + if supported_archs and arch not in supported_archs: + # Note that this does *not* error. The charm will need to + # cope with the snaps it requested never getting installed, + # likely by doing its own check on supported-architectures. + hookenv.log( + "Snap {} not supported on {!r} architecture" "".format(snapname, arch), + ERROR, + ) + continue + installed_flag = "snap.installed.{}".format(snapname) + if not reactive.is_flag_set(installed_flag): + snap.install(snapname, **snap_opts) + if data_changed("snap.install.opts", opts): + snap.connect_all() + + +def check_refresh_available(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + available_refreshes = snap.get_available_refreshes() + for snapname in snap.get_installed_snaps(): + toggle_flag(snap.get_refresh_available_flag(snapname), snapname in available_refreshes) + + +def refresh(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + opts = sorted_snap_opts() + # supported-architectures is EXPERIMENTAL and undocumented. + # It probably should live in the base layer, blocking the charm + # during bootstrap if the arch is unsupported. + arch = uname()[4] + check_refresh_available() + for snapname, snap_opts in opts.items(): + supported_archs = snap_opts.pop("supported-architectures", None) + if supported_archs and arch not in supported_archs: + continue + snap.refresh(snapname, **snap_opts) + snap.connect_all() + + +@reactive.hook("upgrade-charm") +def upgrade_charm(): + refresh() + + +def get_series(): + return subprocess.check_output(["lsb_release", "-sc"], universal_newlines=True).strip() + + +def snapd_supported(): + # snaps are not supported in trusty lxc containers. + if get_series() == "trusty" and host.is_container(): + return False + return True # For all other cases, assume true. + + +def kernel_supported(): + kernel_version = uname().release + + if LooseVersion(kernel_version) < LooseVersion("4.4"): + hookenv.log( + "Snaps do not work on kernel {}, a reboot " + "into a supported kernel (>4.4) is required" + "".format(kernel_version) + ) + return False + return True + + +def ensure_snapd(): + if not snapd_supported(): + hookenv.log("Snaps do not work in this environment", hookenv.ERROR) + raise Exception("Snaps do not work in this environment") + + # I don't use the apt layer, because that would tie this layer + # too closely to apt packaging. Perhaps this is a snap-only system. + if not shutil.which("snap"): + os.environ["DEBIAN_FRONTEND"] = "noninteractive" + cmd = ["apt-get", "install", "-y", "snapd"] + # LP:1699986: Force install of systemd on Trusty. + if get_series() == "trusty": + cmd.append("systemd") + subprocess.check_call(cmd, universal_newlines=True) + + +def proxy_settings(): + proxy_vars = ("http_proxy", "https_proxy") + proxy_env = {key: value for key, value in os.environ.items() if key in proxy_vars} + + snap_proxy = hookenv.config().get("snap_proxy") + if snap_proxy: + proxy_env["http_proxy"] = snap_proxy + proxy_env["https_proxy"] = snap_proxy + return proxy_env + + +def update_snap_proxy(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + # This is a hack based on + # https://bugs.launchpad.net/layer-snap/+bug/1533899/comments/1 + # Do it properly when Bug #1533899 is addressed. + # Note we can't do this in a standard reactive handler as we need + # to ensure proxies are configured before attempting installs or + # updates. + proxy = proxy_settings() + + override_dir = "/etc/systemd/system/snapd.service.d" + path = os.path.join(override_dir, "snap_layer_proxy.conf") + if not proxy and not os.path.exists(path): + return # No proxy asked for and proxy never configured. + + # It seems we cannot rely on this directory existing, so manually + # create it. + if not os.path.exists(override_dir): + host.mkdir(override_dir, perms=0o755) + + if not data_changed("snap.proxy", proxy): + return # Short circuit avoids unnecessary restarts. + + if proxy: + create_snap_proxy_conf(path, proxy) + else: + remove_snap_proxy_conf(path) + subprocess.check_call(["systemctl", "daemon-reload"], universal_newlines=True) + time.sleep(2) + subprocess.check_call(["systemctl", "restart", "snapd.service"], universal_newlines=True) + + +def create_snap_proxy_conf(path, proxy): + host.mkdir(os.path.dirname(path)) + content = dedent( + """\ + # Managed by Juju + [Service] + """ + ) + for proxy_key, proxy_value in proxy.items(): + content += "Environment={}={}\n".format(proxy_key, proxy_value) + host.write_file(path, content.encode()) + + +def remove_snap_proxy_conf(path): + if os.path.exists(path): + os.remove(path) + + +def ensure_path(): + # Per Bug #1662856, /snap/bin may be missing from $PATH. Fix this. + if "/snap/bin" not in os.environ["PATH"].split(":"): + os.environ["PATH"] += ":/snap/bin" + + +def _get_snapd_version(): + stdout = subprocess.check_output(["snap", "version"], stdin=subprocess.DEVNULL, universal_newlines=True) + version_info = dict(line.split(None, 1) for line in stdout.splitlines()) + return LooseVersion(version_info["snapd"]) + + +PREFERENCES = """\ +Package: * +Pin: release a={}-proposed +Pin-Priority: 400 +""" + + +def ensure_snapd_min_version(min_version): + snapd_version = _get_snapd_version() + if snapd_version < LooseVersion(min_version): + from charmhelpers.fetch import add_source, apt_update, apt_install + + # Temporary until LP:1735344 lands + add_source("distro-proposed", fail_invalid=True) + distro = get_series() + # disable proposed by default, needs to explicit + write_file( + "/etc/apt/preferences.d/proposed", + PREFERENCES.format(distro), + ) + apt_update() + # explicitly install snapd from proposed + apt_install("snapd/{}-proposed".format(distro)) + snapd_version = _get_snapd_version() + if snapd_version < LooseVersion(min_version): + hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR) + raise UnsatisfiedMinimumVersionError(min_version, snapd_version) + + +def download_assertion_bundle(proxy_url): + """Download proxy assertion bundle and store id""" + assertions_url = "{}/v2/auth/store/assertions".format(proxy_url) + local_bundle, headers = urlretrieve(assertions_url) + store_id = headers["X-Assertion-Store-Id"] + return local_bundle, store_id + + +def configure_snap_store_proxy(): + # Do nothing if we don't have kernel support yet + if not kernel_supported(): + return + + if not reactive.is_flag_set("config.changed.snap_proxy_url"): + return + config = hookenv.config() + if "snap_proxy_url" not in config: + # The deprecated snap_proxy_url config items have been removed + # from config.yaml. If the charm author hasn't added them back + # explicitly, there is nothing to do. Juju is maintaining these + # settings as model configuration. + return + snap_store_proxy_url = config.get("snap_proxy_url") + if not snap_store_proxy_url and not config.previous("snap_proxy_url"): + # Proxy url is not set, and was not set previous hook. Do nothing, + # to avoid overwriting the Juju maintained setting. + return + ensure_snapd_min_version("2.30") + if snap_store_proxy_url: + bundle, store_id = download_assertion_bundle(snap_store_proxy_url) + try: + subprocess.check_output( + ["snap", "ack", bundle], + stdin=subprocess.DEVNULL, + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + raise InvalidBundleError("snapd could not ack the proxy assertion: " + e.output) + else: + store_id = "" + + try: + subprocess.check_output( + ["snap", "set", "core", "proxy.store={}".format(store_id)], + stdin=subprocess.DEVNULL, + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + raise InvalidBundleError("Proxy ID from header did not match store assertion: " + e.output) + + +register_trigger(when="config.changed.snapd_refresh", clear_flag="snap.refresh.set") + + +@when_not("snap.refresh.set") +@when("snap.installed.core") +def change_snapd_refresh(): + """Set the system refresh.timer option""" + ensure_snapd_min_version("2.31") + timer = hookenv.config()["snapd_refresh"] + was_set = reactive.is_flag_set("snap.refresh.was-set") + if timer or was_set: + snap.set_refresh_timer(timer) + reactive.toggle_flag("snap.refresh.was-set", timer) + reactive.set_flag("snap.refresh.set") + + +# Bootstrap. We don't use standard reactive handlers to ensure that +# everything is bootstrapped before any charm handlers are run. +hookenv.atstart(hookenv.log, "Initializing Snap Layer") +hookenv.atstart(ensure_snapd) +hookenv.atstart(ensure_path) +hookenv.atstart(update_snap_proxy) +hookenv.atstart(configure_snap_store_proxy) +hookenv.atstart(install) diff --git a/kubernetes-worker/reactive/status.py b/kubernetes-worker/reactive/status.py new file mode 100644 index 0000000..2f33f3f --- /dev/null +++ b/kubernetes-worker/reactive/status.py @@ -0,0 +1,4 @@ +from charms import layer + + +layer.status._initialize() diff --git a/kubernetes-worker/reactive/tls_client.py b/kubernetes-worker/reactive/tls_client.py new file mode 100644 index 0000000..afa2228 --- /dev/null +++ b/kubernetes-worker/reactive/tls_client.py @@ -0,0 +1,208 @@ +import os + +from pathlib import Path +from subprocess import check_call + +from charms import layer +from charms.reactive import hook +from charms.reactive import set_state, remove_state +from charms.reactive import when +from charms.reactive import set_flag, clear_flag +from charms.reactive import endpoint_from_flag +from charms.reactive.helpers import data_changed + +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core.hookenv import log + + +@when('certificates.ca.available') +def store_ca(tls): + '''Read the certificate authority from the relation object and install + the ca on this system.''' + # Get the CA from the relationship object. + certificate_authority = tls.get_ca() + if certificate_authority: + layer_options = layer.options('tls-client') + ca_path = layer_options.get('ca_certificate_path') + changed = data_changed('certificate_authority', certificate_authority) + if ca_path: + if changed or not os.path.exists(ca_path): + log('Writing CA certificate to {0}'.format(ca_path)) + # ensure we have a newline at the end of the certificate. + # some things will blow up without one. + # See https://bugs.launchpad.net/charm-kubernetes-master/+bug/1828034 + if not certificate_authority.endswith('\n'): + certificate_authority += '\n' + _write_file(ca_path, certificate_authority) + set_state('tls_client.ca.written') + set_state('tls_client.ca.saved') + if changed: + # Update /etc/ssl/certs and generate ca-certificates.crt + install_ca(certificate_authority) + + +@when('certificates.server.cert.available') +def store_server(tls): + '''Read the server certificate and server key from the relation object + and save them to the certificate directory..''' + server_cert, server_key = tls.get_server_cert() + chain = tls.get_chain() + if chain: + server_cert = server_cert + '\n' + chain + if server_cert and server_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('server_certificate_path') + key_path = layer_options.get('server_key_path') + cert_changed = data_changed('server_certificate', server_cert) + key_changed = data_changed('server_key', server_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing server certificate to {0}'.format(cert_path)) + _write_file(cert_path, server_cert) + set_state('tls_client.server.certificate.written') + set_state('tls_client.server.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing server key to {0}'.format(key_path)) + _write_file(key_path, server_key) + set_state('tls_client.server.key.saved') + + +@when('certificates.client.cert.available') +def store_client(tls): + '''Read the client certificate and client key from the relation object + and copy them to the certificate directory.''' + client_cert, client_key = tls.get_client_cert() + chain = tls.get_chain() + if chain: + client_cert = client_cert + '\n' + chain + if client_cert and client_key: + layer_options = layer.options('tls-client') + cert_path = layer_options.get('client_certificate_path') + key_path = layer_options.get('client_key_path') + cert_changed = data_changed('client_certificate', client_cert) + key_changed = data_changed('client_key', client_key) + if cert_path: + if cert_changed or not os.path.exists(cert_path): + log('Writing client certificate to {0}'.format(cert_path)) + _write_file(cert_path, client_cert) + set_state('tls_client.client.certificate.written') + set_state('tls_client.client.certificate.saved') + if key_path: + if key_changed or not os.path.exists(key_path): + log('Writing client key to {0}'.format(key_path)) + _write_file(key_path, client_key) + set_state('tls_client.client.key.saved') + + +@when('certificates.certs.changed') +def update_certs(): + tls = endpoint_from_flag('certificates.certs.changed') + certs_paths = unitdata.kv().get('layer.tls-client.cert-paths', {}) + all_ready = True + any_changed = False + maps = { + 'server': tls.server_certs_map, + 'client': tls.client_certs_map, + } + + if maps.get('client') == {}: + log( + 'No client certs found using maps. Checking for global \ + client certificates.', + 'WARNING' + ) + # Check for global certs, + # Backwards compatibility https://bugs.launchpad.net/charm-kubernetes-master/+bug/1825819 + cert_pair = tls.get_client_cert() + if cert_pair is not None: + for client_name in certs_paths.get('client', {}).keys(): + maps.get('client').update({ + client_name: cert_pair + }) + + chain = tls.get_chain() + for cert_type in ('server', 'client'): + for common_name, paths in certs_paths.get(cert_type, {}).items(): + cert_pair = maps[cert_type].get(common_name) + if not cert_pair: + all_ready = False + continue + if not data_changed('layer.tls-client.' + '{}.{}'.format(cert_type, common_name), cert_pair): + continue + + cert = None + key = None + if type(cert_pair) is not tuple: + if paths['crt']: + cert = cert_pair.cert + if paths['key']: + key = cert_pair.key + else: + cert, key = cert_pair + + if cert: + if chain: + cert = cert + '\n' + chain + _ensure_directory(paths['crt']) + Path(paths['crt']).write_text(cert) + + if key: + _ensure_directory(paths['key']) + Path(paths['key']).write_text(key) + + any_changed = True + # clear flags first to ensure they are re-triggered if left set + clear_flag('tls_client.{}.certs.changed'.format(cert_type)) + clear_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + set_flag('tls_client.{}.certs.changed'.format(cert_type)) + set_flag('tls_client.{}.cert.{}.changed'.format(cert_type, + common_name)) + if all_ready: + set_flag('tls_client.certs.saved') + if any_changed: + clear_flag('tls_client.certs.changed') + set_flag('tls_client.certs.changed') + clear_flag('certificates.certs.changed') + + +def install_ca(certificate_authority): + '''Install a certificiate authority on the system by calling the + update-ca-certificates command.''' + if certificate_authority: + name = hookenv.service_name() + # Create a path to install CAs on Debian systems. + ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(name) + log('Writing CA certificate to {0}'.format(ca_path)) + _write_file(ca_path, certificate_authority) + # Update the trusted CAs on this system (a time expensive operation). + check_call(['update-ca-certificates']) + log('Generated ca-certificates.crt for {0}'.format(name)) + set_state('tls_client.ca_installed') + + +@hook('upgrade-charm') +def remove_states(): + remove_state('tls_client.ca.saved') + remove_state('tls_client.server.certificate.saved') + remove_state('tls_client.server.key.saved') + remove_state('tls_client.client.certificate.saved') + remove_state('tls_client.client.key.saved') + + +def _ensure_directory(path): + '''Ensure the parent directory exists creating directories if necessary.''' + directory = os.path.dirname(path) + if not os.path.isdir(directory): + os.makedirs(directory) + os.chmod(directory, 0o770) + + +def _write_file(path, content): + '''Write the path to a file.''' + _ensure_directory(path) + with open(path, 'w') as stream: + stream.write(content) + os.chmod(path, 0o440) diff --git a/kubernetes-worker/requirements.txt b/kubernetes-worker/requirements.txt new file mode 100644 index 0000000..55543d9 --- /dev/null +++ b/kubernetes-worker/requirements.txt @@ -0,0 +1,3 @@ +mock +flake8 +pytest diff --git a/kubernetes-worker/revision b/kubernetes-worker/revision new file mode 100644 index 0000000..c227083 --- /dev/null +++ b/kubernetes-worker/revision @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/kubernetes-worker/setup.py b/kubernetes-worker/setup.py new file mode 100755 index 0000000..b30bff5 --- /dev/null +++ b/kubernetes-worker/setup.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import os +from setuptools import setup + +here = os.path.abspath(os.path.dirname(__file__)) + +with open(os.path.join(here, "README.md")) as f: + README = f.read() + +setup( + name="layer_snap", + version="1.0.0", + description="layer_snap", + long_description=README, + license="Apache License 2.0", + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + ], + url="https://git.launchpad.net/layer-snap", + package_dir={"": "lib"}, + packages=["charms/layer"], + include_package_data=True, + zip_safe=False, + install_requires=["charmhelpers", "charms.reactive"], +) diff --git a/kubernetes-worker/templates/cdk-service-kicker b/kubernetes-worker/templates/cdk-service-kicker new file mode 100644 index 0000000..26d3740 --- /dev/null +++ b/kubernetes-worker/templates/cdk-service-kicker @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +# This service runs on boot to work around issues relating to LXD and snapd. + +# Workaround for https://github.com/conjure-up/conjure-up/issues/1448 +if [ -f '/proc/1/environ' ] && grep -q '^container=lxc' /proc/1/environ; then + echo "lxc detected, applying snapd apparmor profiles" + (set +e + apparmor_parser /var/lib/snapd/apparmor/profiles/* + echo "apparmor_parser: exit status $?" + ) +else + echo "lxc not detected, skipping snapd apparmor profiles" +fi + +# Workaround for https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357 +services="{{services}}" + +deadline="$(expr "$(date +%s)" + 600)" + +while [ "$(date +%s)" -lt "$deadline" ]; do + for service in $services; do + echo "$service: checking" + if ! systemctl is-active "$service"; then + echo "$service: not active, restarting" + systemctl restart "$service" || true + fi + done + + sleep 10 +done + +echo "deadline has passed, exiting gracefully" diff --git a/kubernetes-worker/templates/cdk-service-kicker.service b/kubernetes-worker/templates/cdk-service-kicker.service new file mode 100644 index 0000000..5c2105e --- /dev/null +++ b/kubernetes-worker/templates/cdk-service-kicker.service @@ -0,0 +1,10 @@ +[Unit] +Description=cdk-service-kicker + +[Service] +ExecStart=/usr/bin/cdk-service-kicker +Restart=on-failure +Type=simple + +[Install] +WantedBy=multi-user.target diff --git a/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml b/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubernetes-worker/templates/default-http-backend.yaml b/kubernetes-worker/templates/default-http-backend.yaml new file mode 100644 index 0000000..4f1969f --- /dev/null +++ b/kubernetes-worker/templates/default-http-backend.yaml @@ -0,0 +1,62 @@ +apiVersion: {{ deployment_api_version }} +kind: Deployment +metadata: + name: default-http-backend-{{ juju_application }} + labels: + app.kubernetes.io/name: default-http-backend-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + cdk-restart-on-ca-change: "true" + namespace: ingress-nginx-{{ juju_application }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: default-http-backend-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + template: + metadata: + labels: + app.kubernetes.io/name: default-http-backend-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend-{{ juju_application }} + # Any image is permissible as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: {{ defaultbackend_image }} + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: default-http-backend-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app.kubernetes.io/name: default-http-backend-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} diff --git a/kubernetes-worker/templates/ingress-daemon-set.yaml b/kubernetes-worker/templates/ingress-daemon-set.yaml new file mode 100644 index 0000000..72d0fcd --- /dev/null +++ b/kubernetes-worker/templates/ingress-daemon-set.yaml @@ -0,0 +1,356 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx-{{ juju_application }} + labels: + cdk-{{ juju_application }}-ingress: "true" + +{%- if default_ssl_certificate_option %} +--- +kind: Secret +apiVersion: v1 +type: Opaque +metadata: + name: default-ssl-certificate + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +data: + tls.crt: {{ default_ssl_certificate }} + tls.key: {{ default_ssl_key }} +{%- endif %} + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +data: + use-forwarded-headers: "{{ use_forwarded_headers }}" + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tcp-services + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: udp-services + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nginx-ingress-role-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resourceNames: + - ingress-controller-leader + resources: + - configmaps + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role-{{ juju_application }} +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole-{{ juju_application }} +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + +--- +apiVersion: {{ daemonset_api_version }} +kind: DaemonSet +metadata: + name: nginx-ingress-controller-{{ juju_application }} + namespace: ingress-nginx-{{ juju_application }} + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + juju-application: nginx-ingress-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + cdk-restart-on-ca-change: "true" +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + serviceAccountName: nginx-ingress-serviceaccount-{{ juju_application }} + nodeSelector: + juju-application: {{ juju_application }} + terminationGracePeriodSeconds: 60 + # hostPort doesn't work with CNI, so we have to use hostNetwork instead + # see https://github.com/kubernetes/kubernetes/issues/23920 + hostNetwork: true + containers: + - name: nginx-ingress-controller{{ juju_application }} + image: {{ ingress_image }} + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --annotations-prefix=nginx.ingress.kubernetes.io + - --enable-ssl-chain-completion={{ ssl_chain_completion }} + - --enable-ssl-passthrough={{ enable_ssl_passthrough }} +{%- if default_ssl_certificate_option %} + {{ default_ssl_certificate_option }} +{%- endif %} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ ingress_uid }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + name: nginx-ingress-controller + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +spec: + controller: k8s.io/ingress-nginx diff --git a/kubernetes-worker/templates/microbot-example.yaml b/kubernetes-worker/templates/microbot-example.yaml new file mode 100644 index 0000000..c89be53 --- /dev/null +++ b/kubernetes-worker/templates/microbot-example.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: microbot + name: microbot +spec: + replicas: {{ replicas }} + selector: + matchLabels: + app: microbot + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: microbot + spec: + containers: + - image: {{ registry|default("docker.io") }}/cdkbot/microbot-{{ arch }}:latest + imagePullPolicy: "" + name: microbot + ports: + - containerPort: 80 + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 5 + timeoutSeconds: 30 + resources: {} + restartPolicy: Always + serviceAccountName: "" +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: microbot + labels: + app: microbot +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: microbot +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: microbot-ingress +spec: + rules: + - host: microbot.{{ public_address }}.nip.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: microbot + port: + number: 80 diff --git a/kubernetes-worker/templates/nagios_plugin.py b/kubernetes-worker/templates/nagios_plugin.py new file mode 100644 index 0000000..3e4a9b4 --- /dev/null +++ b/kubernetes-worker/templates/nagios_plugin.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2019 Canonical Ltd. + +import nagios_plugin3 +import yaml +from subprocess import check_output, CalledProcessError, PIPE + +snap_resources = ['kubectl', 'kubelet', 'kube-proxy'] + + +def check_snaps_installed(): + """Confirm the snaps are installed, raise an error if not""" + for snap_name in snap_resources: + cmd = ['snap', 'list', snap_name] + try: + check_output(cmd).decode('UTF-8') + except Exception: + msg = '{} snap is not installed'.format(snap_name) + raise nagios_plugin3.CriticalError(msg) + + +def check_node(node): + # Note: Keep the Ready check first since all checks will fail when not Ready + checks = [{'name': 'Ready', + 'expected': 'True', + 'type': 'error', + 'error': 'Node Not Ready'}, + {'name': 'MemoryPressure', + 'expected': 'False', + 'type': 'warn', + 'error': 'Memory Pressure'}, + {'name': 'DiskPressure', + 'expected': 'False', + 'type': 'warn', + 'error': 'Disk Pressure'}, + {'name': 'PIDPressure', + 'expected': 'False', + 'type': 'warn', + 'error': 'PID Pressure'}, + ] + msg = [] + error = False + for check in checks: + # find the status that matches + for s in node['status']['conditions']: + if s['type'] == check['name']: + # does it match expectations? If not, toss it on the list + # of errors so we don't show the first issue, but all. + if s['status'].lower() != check['expected'].lower(): + msg.append(check['error']) + if check['type'] == 'error': + error = True + break + else: + err_msg = 'Unable to find status for {}'.format(check['error']) + raise nagios_plugin3.CriticalError(err_msg) + + if msg: + if error: + raise nagios_plugin3.CriticalError(msg) + else: + raise nagios_plugin3.WarnError(msg) + + +def verify_node_registered_and_ready(): + node = None + try: + cmd = [ + "/snap/bin/kubectl", "--kubeconfig", "/var/lib/nagios/.kube/config", + "get", "no", "{{node_name}}", "-o=yaml" + ] + node = yaml.safe_load(check_output(cmd, stderr=PIPE)) + except CalledProcessError as e: + err = e.stderr.decode('UTF-8') + if "not found" in err: + raise nagios_plugin3.CriticalError("Unable to find " + "node registered on API server") + if not node: + raise nagios_plugin3.CriticalError("Unable to run kubectl " + "and parse output") + return check_node(node) + + +def main(): + nagios_plugin3.try_check(check_snaps_installed) + nagios_plugin3.try_check(verify_node_registered_and_ready) + print("OK - No memory, disk, or PID pressure. Registered with API server") + + +if __name__ == "__main__": + main() diff --git a/kubernetes-worker/templates/nfs-provisioner.yaml b/kubernetes-worker/templates/nfs-provisioner.yaml new file mode 100644 index 0000000..0f0c85c --- /dev/null +++ b/kubernetes-worker/templates/nfs-provisioner.yaml @@ -0,0 +1,103 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: default + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: fuseim.pri/ifs +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: {{registry|default('quay.io')}}/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: fuseim.pri/ifs + - name: NFS_SERVER + value: {{ hostname }} + - name: NFS_PATH + value: {{ mountpoint }} + volumes: + - name: nfs-client-root + nfs: + server: {{ hostname }} + path: {{ mountpoint }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: default +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: default +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/kubernetes-worker/tests/data/ip_addr_json b/kubernetes-worker/tests/data/ip_addr_json new file mode 100644 index 0000000..9b10664 --- /dev/null +++ b/kubernetes-worker/tests/data/ip_addr_json @@ -0,0 +1,30 @@ +[ + { + "ifname": "ens192", + "operstate": "UP", + "addr_info": [ + { + "local": "10.246.154.77", + "prefixlen": 24, + "metric": 100 + }, + {} + ] + }, + { + "ifname": "lxdbr0", + "operstate": "UP", + "addr_info": [ + { + "local": "10.111.246.1", + "prefixlen": 24 + } + ] + }, + { + "link_index": 4, + "ifname": "veth890e3a36", + "operstate": "UP", + "addr_info": [] + } +] \ No newline at end of file diff --git a/kubernetes-worker/tests/functional/conftest.py b/kubernetes-worker/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-worker/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-worker/tests/functional/test_k8s_common.py b/kubernetes-worker/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubernetes-worker/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubernetes-worker/tests/unit/conftest.py b/kubernetes-worker/tests/unit/conftest.py new file mode 100644 index 0000000..44f005e --- /dev/null +++ b/kubernetes-worker/tests/unit/conftest.py @@ -0,0 +1,7 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() +charms.unit_test.patch_module("charms.coordinator") +charms.unit_test.patch_module("charms.leadership") +charms.layer.kubernetes_common.retry.return_value = charms.unit_test.identity diff --git a/kubernetes-worker/tests/unit/test_k8s_common.py b/kubernetes-worker/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..5e4fc56 --- /dev/null +++ b/kubernetes-worker/tests/unit/test_k8s_common.py @@ -0,0 +1,148 @@ +import json +import string +from subprocess import CalledProcessError +from pathlib import Path +from unittest.mock import Mock, patch +from charms.reactive import endpoint_from_flag + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" + + +@patch("os.listdir") +@patch("os.remove") +@patch("os.symlink") +def test_configure_default_cni(os_symlink, os_remove, os_listdir): + os_listdir.return_value = ["05-default.conflist", "10-cni.conflist"] + cni = endpoint_from_flag("cni.available") + cni.get_config.return_value = { + "cidr": "192.168.0.0/24", + "cni-conf-file": "10-cni.conflist", + } + kc.configure_default_cni("test-cni") + os_remove.assert_called_once_with("/etc/cni/net.d/05-default.conflist") + os_symlink.assert_called_once_with( + "10-cni.conflist", "/etc/cni/net.d/05-default.conflist" + ) + + +def test_get_bind_addrs(): + response = Path("tests", "data", "ip_addr_json").read_bytes() + with patch.object(kc, "check_output", return_value=response): + addrs = kc.get_bind_addrs() + assert addrs == ["10.246.154.77"] diff --git a/kubernetes-worker/tests/unit/test_layer.py b/kubernetes-worker/tests/unit/test_layer.py new file mode 100644 index 0000000..7cc9344 --- /dev/null +++ b/kubernetes-worker/tests/unit/test_layer.py @@ -0,0 +1,69 @@ +import pytest +import unittest.mock as mock + +from charms.layer import kubernetes_node_base +from charmhelpers.core import hookenv + + +class TestNodeLabels: + @pytest.fixture(autouse=True) + def setup(self, monkeypatch, request): + self.kube_control = mock.Mock() + self.config = {"labels": f'{request.node.name}="value"'} + + hc = mock.Mock() + hc.side_effect = lambda k=None: self.config[k] if k else self.config + monkeypatch.setattr(hookenv, "config", hc) + + self.hook_log = mock.Mock() + monkeypatch.setattr(hookenv, "log", self.hook_log) + + hsn = mock.Mock(return_value="kubernetes-control-plane") + monkeypatch.setattr(hookenv, "service_name", hsn) + + gnn = mock.Mock(return_value="the-node") + monkeypatch.setattr(kubernetes_node_base, "get_node_name", gnn) + + mock_call = self.call = mock.Mock(return_value=0) + monkeypatch.setattr(kubernetes_node_base, "call", mock_call) + + self.base_node_cmd = [ + "kubectl", + "--kubeconfig=/path/to/kube/config", + "label", + "node", + "the-node", + ] + + def test_label_add(self, request): + label_maker = kubernetes_node_base.LabelMaker("/path/to/kube/config") + label_maker.apply_node_labels() + + call_set = [ + mock.call(self.base_node_cmd + expected) + for expected in [ + [f'{request.node.name}="value"', "--overwrite"], + ["juju-application=kubernetes-control-plane", "--overwrite"], + ["juju.io/cloud-"], + ] + ] + self.call.assert_has_calls(call_set, any_order=False) + + def test_invalid_label(self): + self.config = {"labels": "too=many=equals not_enough_equals"} + label_maker = kubernetes_node_base.LabelMaker("/path/to/kube/config") + label_maker.apply_node_labels() + call_set = [ + mock.call(self.base_node_cmd + expected) + for expected in [ + ["juju-application=kubernetes-control-plane", "--overwrite"], + ["juju.io/cloud-"], + ] + ] + self.call.assert_has_calls(call_set, any_order=False) + + call_set = [ + mock.call("Skipping malformed option: too=many=equals."), + mock.call("Skipping malformed option: not_enough_equals."), + ] + self.hook_log.assert_has_calls(call_set, any_order=False) diff --git a/kubernetes-worker/tox.ini b/kubernetes-worker/tox.ini new file mode 100644 index 0000000..1ced265 --- /dev/null +++ b/kubernetes-worker/tox.ini @@ -0,0 +1,30 @@ +[flake8] +max-line-length = 88 +ignore = + W503 # line break before binary operator + +[tox] +skipsdist = True +envlist = lint,unit + +[testenv] +setenv = + PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace +passenv = OS_* TEST_* + +[testenv:unit] +deps = + pyyaml + pytest + charms.unit_test + ipdb +commands = pytest --tb native -s {posargs} {toxinidir}/tests/unit + +[testenv:lint] +deps = + black + flake8 +commands = + flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests + black --check {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests diff --git a/kubernetes-worker/version b/kubernetes-worker/version new file mode 100644 index 0000000..a43c25d --- /dev/null +++ b/kubernetes-worker/version @@ -0,0 +1 @@ +1.24+ck1 \ No newline at end of file diff --git a/kubernetes-worker/wheelhouse.txt b/kubernetes-worker/wheelhouse.txt new file mode 100644 index 0000000..2855194 --- /dev/null +++ b/kubernetes-worker/wheelhouse.txt @@ -0,0 +1,39 @@ +# layer:basic +# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164 +# even with installing setuptools before upgrading pip ends up with pip seeing +# the older setuptools at the system level if include_system_packages is true +pip>=18.1,<19.0;python_version < '3.8' +pip;python_version >= '3.8' +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty +Jinja2==2.10;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +Jinja2==2.11;python_version == '2.7' or python_version == '3.5' # py27, py35 +Jinja2;python_version >= '3.6' # py36 and on + +PyYAML==5.2;python_version >= '3.0' and python_version <= '3.4' # py3 trusty +PyYAML<5.4;python_version == '2.7' or python_version >= '3.5' # all else + +MarkupSafe<2.0.0;python_version < '3.6' +MarkupSafe<2.1.0;python_version == '3.6' # Just for python 3.6 +MarkupSafe;python_version >= '3.7' # newer pythons + +setuptools<42;python_version < '3.8' +setuptools;python_version >= '3.8' +setuptools-scm<=1.17.0;python_version < '3.8' +setuptools-scm;python_version >= '3.8' +flit_core;python_version >= '3.8' +charmhelpers>=0.4.0,<2.0.0 +charms.reactive>=0.1.0,<2.0.0 +wheel<0.34;python_version < '3.8' +wheel;python_version >= '3.8' +# pin netaddr to avoid pulling importlib-resources +netaddr<=0.7.19 + +# layer:snap +# Newer versions of tenacity rely on `typing` which is in stdlib in +# python3.5 but not python3.4. We want to continue to support +# python3.4 (Trusty) +tenacity<5.0.4 + +# kubernetes-worker + diff --git a/kubernetes-worker/wheelhouse/Jinja2-3.0.3.tar.gz b/kubernetes-worker/wheelhouse/Jinja2-3.0.3.tar.gz new file mode 100644 index 0000000..cb150bc Binary files /dev/null and b/kubernetes-worker/wheelhouse/Jinja2-3.0.3.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/MarkupSafe-2.0.1.tar.gz b/kubernetes-worker/wheelhouse/MarkupSafe-2.0.1.tar.gz new file mode 100644 index 0000000..7a37fc9 Binary files /dev/null and b/kubernetes-worker/wheelhouse/MarkupSafe-2.0.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/PyYAML-5.3.1.tar.gz b/kubernetes-worker/wheelhouse/PyYAML-5.3.1.tar.gz new file mode 100644 index 0000000..915d67b Binary files /dev/null and b/kubernetes-worker/wheelhouse/PyYAML-5.3.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/charmhelpers-1.2.1.tar.gz b/kubernetes-worker/wheelhouse/charmhelpers-1.2.1.tar.gz new file mode 100644 index 0000000..78f281b Binary files /dev/null and b/kubernetes-worker/wheelhouse/charmhelpers-1.2.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/charms.reactive-1.5.0.tar.gz b/kubernetes-worker/wheelhouse/charms.reactive-1.5.0.tar.gz new file mode 100644 index 0000000..3d6c57b Binary files /dev/null and b/kubernetes-worker/wheelhouse/charms.reactive-1.5.0.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz b/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz new file mode 100644 index 0000000..cc31d9d Binary files /dev/null and b/kubernetes-worker/wheelhouse/netaddr-0.7.19.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/pbr-5.9.0.tar.gz b/kubernetes-worker/wheelhouse/pbr-5.9.0.tar.gz new file mode 100644 index 0000000..9c46601 Binary files /dev/null and b/kubernetes-worker/wheelhouse/pbr-5.9.0.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/pip-18.1.tar.gz b/kubernetes-worker/wheelhouse/pip-18.1.tar.gz new file mode 100644 index 0000000..a18192d Binary files /dev/null and b/kubernetes-worker/wheelhouse/pip-18.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz b/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip b/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip new file mode 100644 index 0000000..3345759 Binary files /dev/null and b/kubernetes-worker/wheelhouse/setuptools-41.6.0.zip differ diff --git a/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz new file mode 100644 index 0000000..43b16c7 Binary files /dev/null and b/kubernetes-worker/wheelhouse/setuptools_scm-1.17.0.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz b/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz b/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz new file mode 100644 index 0000000..c7d05ba Binary files /dev/null and b/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz b/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz new file mode 100644 index 0000000..c922c4e Binary files /dev/null and b/kubernetes-worker/wheelhouse/wheel-0.33.6.tar.gz differ