diff --git a/ReadME.MD b/ReadME.MD index c9a7fa1..35cf1d9 100644 --- a/ReadME.MD +++ b/ReadME.MD @@ -1,19 +1,20 @@ -# Kubernetes 1.21 -cs:~containers/charmed-kubernetes-657 +# Kubernetes 1.22 +cs:~containers/charmed-kubernetes-814 ```Bash -charm pull cs:~containers/etcd-583 -charm pull cs:~containers/easyrsa-373 -charm pull cs:~containers/kubernetes-master-990 -charm pull cs:~containers/kubernetes-worker-757 -charm pull cs:~containers/containerd-119 -charm pull cs:~containers/kata-108 -charm pull cs:~containers/calico-812 +charm pull cs:~containers/etcd-633 +charm pull cs:~containers/easyrsa-419 +charm pull cs:~containers/kubernetes-master-1077 +charm pull cs:~containers/kubernetes-worker-815 +charm pull cs:~containers/containerd-177 +charm pull cs:~containers/kata-138 +charm pull cs:~containers/calico-838 +charm pull cs:~containers/flannel-596 # Extend -charm pull cs:~containers/kubeapi-load-balancer-786 -charm pull cs:~containers/keepalived-85 +charm pull cs:~containers/kubeapi-load-balancer-843 +charm pull cs:~containers/keepalived-110 charm pull cs:~containers/coredns-20 +# Other charm pull cs:~containers/ubuntu-20 charm pull cs:~containers/nrpe-75 -``` - +``` \ No newline at end of file diff --git a/calico/.build.manifest b/calico/.build.manifest index 0471d71..8fdefc8 100644 --- a/calico/.build.manifest +++ b/calico/.build.manifest @@ -6,8 +6,8 @@ "url": "layer:options" }, { - "branch": "refs/heads/stable", - "rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16", + "branch": "refs/heads/master\nrefs/heads/stable", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { @@ -20,9 +20,14 @@ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, + { + "branch": "refs/heads/master\nrefs/heads/stable", + "rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156", + "url": "layer:kubernetes-common" + }, { "branch": "refs/heads/stable", - "rev": "63c6d240f29b0366c3839dacd4e25d63a368da36", + "rev": "96b4e06d5d35fec30cdf2cc25076dd25c51b893c", "url": "calico" }, { @@ -32,7 +37,7 @@ }, { "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b", + "rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1", "url": "interface:kubernetes-cni" } ], @@ -42,26 +47,21 @@ "dynamic", "unchecked" ], - ".github/workflows/build.yml": [ - "calico", + ".github/workflows/main.yml": [ + "layer:kubernetes-common", "static", - "4892e4eb72fb0d0efaa1c6b62f8f132cc69ea2b967c9604c91d4f16e0ec6e26b" + "d4f8fec0456cb2fc05993253a995983488a76fbbef10c2ee40649e83d6c9e078" ], ".github/workflows/tox.yaml": [ "calico", "static", - "8de54f40fc8e9385b79ed8d19e6ea765bdd6c48185fbd8bd7142834990982d45" + "8b7dba2bd100fc3dfce764499b0eba1799b58469701b032b238cb1d0055c44bb" ], ".gitignore": [ "calico", "static", "3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "CONTRIBUTING.md": [ "calico", "static", @@ -78,9 +78,9 @@ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd" ], "Makefile": [ - "calico", + "layer:basic", "static", - "d49436a9eb35598691285b00e6a678ad74e391a818d55989116e264f40fcd9e6" + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" ], "README.md": [ "calico", @@ -105,7 +105,7 @@ "config.yaml": [ "calico", "dynamic", - "c6014840f64c5c4cab24fa54735832e36ecd11de15ab6e34ecedf5839feca695" + "d75dd7b4ddd803d88c5d86b14826fa7f047b8e6907885cafe37cda29afb3c13d" ], "copyright": [ "layer:status", @@ -257,15 +257,15 @@ "static", "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e" ], + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", + "static", + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" + ], "hooks/relations/kubernetes-cni/.gitignore": [ "interface:kubernetes-cni", "static", - "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e" - ], - "hooks/relations/kubernetes-cni/.travis.yml": [ - "interface:kubernetes-cni", - "static", - "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" ], "hooks/relations/kubernetes-cni/README.md": [ "interface:kubernetes-cni", @@ -285,17 +285,17 @@ "hooks/relations/kubernetes-cni/provides.py": [ "interface:kubernetes-cni", "static", - "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e" + "e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee" ], "hooks/relations/kubernetes-cni/requires.py": [ "interface:kubernetes-cni", "static", - "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa" + "45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe" ], "hooks/relations/kubernetes-cni/tox.ini": [ "interface:kubernetes-cni", "static", - "bf0fb0883583bb3deebd17e7ebd4599d9f3770c19a6fc7683044654b6e982c90" + "f08626c9b65362031edb07f96f15f101bc3dda075abc64f54d1c83efd2c05e39" ], "hooks/start": [ "layer:basic", @@ -325,7 +325,7 @@ "layer.yaml": [ "calico", "dynamic", - "8547f11913f564feb1ca4f6674788385e237b4d8d1939c5a8675c6bbb4f1d8e3" + "3a95aaa6fd50027d9a98ad9322568cfb0c228135df7cbff79953a86d01ec533f" ], "lib/calico_common.py": [ "calico", @@ -345,13 +345,18 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", "static", "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d" ], + "lib/charms/layer/kubernetes_common.py": [ + "layer:kubernetes-common", + "static", + "29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b" + ], "lib/charms/layer/options.py": [ "layer:options", "static", @@ -390,7 +395,7 @@ "reactive/calico.py": [ "calico", "static", - "3037c342634848aca03bb3a8b818102ae13e4d82942e1c8f8761c8465b808e14" + "6b8bef93b474c95bab4d9df09b74b8082f230fa5e906b469fae66baa319472ad" ], "reactive/leadership.py": [ "layer:leadership", @@ -407,21 +412,6 @@ "static", "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" ], - "script/bootstrap": [ - "calico", - "static", - "1985d9a07e8d764351530f6eb1b81bef6a4c035dc75422c03f4672ceaf1a4c18" - ], - "script/build": [ - "calico", - "static", - "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633" - ], - "script/upload": [ - "calico", - "static", - "db3cd04f1d4c2a2be12becb8d62bf879701cbca3da0d458b4c362439b32ebfc1" - ], "templates/10-calico.conflist": [ "calico", "static", @@ -430,47 +420,97 @@ "templates/calico-node.service": [ "calico", "static", - "beae0c32a25f911a37363064af7bfa96a39f14ab99b3060412491382a81ddaa7" + "cc80a397a77f7d80740c697fcdaffd373790492b01959649587345bdcfe44fe3" ], "templates/calicoctl": [ "calico", "static", "b913dfdce8de51aa9a13950817e4101f8f4229052927a272fff5b37a4370537f" ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], "templates/policy-controller.yaml": [ "calico", "static", - "3bd0f0f714a8c7f418fdb7556f10097d963dbf0c6232a41606163c30022f0e9e" + "427820ac4957c60306b3084c4426ecc84af34dc2b2a8f7c0d70242e53c27957c" ], - "tests/00-setup": [ + "tests/data/bird-operator/config.yaml": [ "calico", "static", - "111c079b81d260bbcd716dcf41672372a4cf4aaa14154b6c3055deeedae37a06" + "4786605f043192ab2970b7abd55c434620463248e2840a7d25a9ca31d913b416" ], - "tests/10-deploy": [ + "tests/data/bird-operator/metadata.yaml": [ "calico", "static", - "e895f7720cd0ce3956082054f15b0cebce683aa44f66bb32038bab1e693bf74f" + "aff75a91343249cb86978512609d0e00c9d6271664b18eeed9e4ef415bd22937" ], - "tests/conftest.py": [ + "tests/data/bird-operator/requirements.txt": [ + "calico", + "static", + "7a70b4e7059a7d283c883288be3de0bed02d10fda4602c8de4699debcf6afbf2" + ], + "tests/data/bird-operator/src/charm.py": [ + "calico", + "static", + "8e0374bf6e887604e3ede4ba33d37cf0e43202b653cb3945cefff0d2a33e7a0c" + ], + "tests/data/bundle.yaml": [ + "calico", + "static", + "0bfb15407e4ac33c87718e20493c0eec3979d8658db85d4f38620b9fca4408bd" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/integration/conftest.py": [ + "calico", + "static", + "9069857cdd09798df7813cec38c1147938e9affb777d971c6b8ff3405fa726ff" + ], + "tests/integration/test_calico_integration.py": [ + "calico", + "static", + "65ce21b694059e6cf6b39d28cba16252a34322b77b5b33050fd46482f60d07fd" + ], + "tests/unit/conftest.py": [ "calico", "static", "2c58cb11bf276805f586c05c20bf4ba15a7431b5c23ea3323dc4256ddc34c4d2" ], - "tests/test_calico.py": [ + "tests/unit/test_calico.py": [ "calico", "static", "2de748d396d66f5c581ade110a3f8a709e6aabe50f97502e1d0ac0ec817c223d" ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4" + ], + "tests/validate-wheelhouse.sh": [ + "calico", + "static", + "cdfd66832b110243b6fd165a75562d9b958f9741b334be2d3a7a1d05adfa6fe7" + ], "tox.ini": [ "calico", "static", - "1ce2114e5084c1f5bc99f1768c0566f77b8216166974de3b17c47e97b54aba7d" + "a96563719d29a96d41a0e91ef08da35b5e1de5aee2d5884c74d85dca7f43f2d2" ], "version": [ "calico", "dynamic", - "44a751fcf4d3ba30169f70f2b7b84b9cfc381b6f514c41fe4d3ef8afe2ff9086" + "d42cce56c73a1877421efe5be4d1e7e914a99ce4e1e4b0143bd97ea895c7c629" ], "wheelhouse.txt": [ "calico", @@ -497,10 +537,10 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.22.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", @@ -532,10 +572,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", diff --git a/calico/.github/workflows/build.yml b/calico/.github/workflows/build.yml deleted file mode 100644 index 043ccad..0000000 --- a/calico/.github/workflows/build.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Builds calico charm -on: [push, pull_request] - -jobs: - build: - name: Build charm - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: '3.8' - - name: Run build - run: | - make charm diff --git a/calico/.github/workflows/main.yml b/calico/.github/workflows/main.yml new file mode 100644 index 0000000..6768aef --- /dev/null +++ b/calico/.github/workflows/main.yml @@ -0,0 +1,22 @@ +name: Test Suite +on: [pull_request] + +jobs: + tests: + name: Lint, Unit, & Func Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox diff --git a/calico/.github/workflows/tox.yaml b/calico/.github/workflows/tox.yaml index d43940d..4ecc315 100644 --- a/calico/.github/workflows/tox.yaml +++ b/calico/.github/workflows/tox.yaml @@ -1,22 +1,52 @@ name: Run tests with Tox -on: [push] +on: + push: + branches: [master] + pull_request: + branches: [master] jobs: - build: - + lint-unit-wheelhouse: + name: Lint, Unit, Wheelhouse runs-on: ubuntu-latest strategy: matrix: python: [3.6, 3.7, 3.8, 3.9] - steps: - - uses: actions/checkout@v2 + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + sudo snap install charm --classic + - name: Lint + run: tox -vve lint + - name: Unit Tests + run: tox -vve unit + - name: Validate Wheelhouse + run: tox -vve validate-wheelhouse + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v2 with: - python-version: ${{ matrix.python }} - - name: Install Tox and any other packages - run: pip install tox - - name: Run Tox - run: tox -e py # Run tox using the version of Python in `PATH` + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764" + - name: Run test + run: tox -e integration diff --git a/calico/.travis/profile-update.yaml b/calico/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/calico/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/calico/Makefile b/calico/Makefile index 9348753..a1ad3a5 100644 --- a/calico/Makefile +++ b/calico/Makefile @@ -1,18 +1,24 @@ -CHANNEL ?= unpublished -CHARM := calico +#!/usr/bin/make -setup-env: - bash script/bootstrap +all: lint unit_test -charm: setup-env - bash script/build -upload: -ifndef NAMESPACE - $(error NAMESPACE is not set) -endif +.PHONY: clean +clean: + @rm -rf .tox - env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) -.phony: charm upload setup-env -all: charm +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/calico/config.yaml b/calico/config.yaml index f0b04bc..ba4f479 100644 --- a/calico/config.yaml +++ b/calico/config.yaml @@ -1,4 +1,22 @@ "options": + "bgp-service-cluster-ips": + "type": "string" + "description": | + Space-separated list of service cluster CIDRs to advertise over BGP. + These will be passed to the .spec.serviceClusterIPs field of the default + BGPConfiguration in Calico. + + Example value: ”10.0.0.0/24 10.0.1.0/24” + "default": "" + "bgp-service-external-ips": + "type": "string" + "description": | + Space-separated list of service external CIDRs to advertise over BGP. + These will be passed to the .spec.serviceExternalIPs field of the default + BGPConfiguration in Calico. + + Example value: ”10.0.0.0/24 10.0.1.0/24” + "default": "" "calico-node-image": "type": "string" # Please refer to layer-canal/versioning.md before changing the version below. diff --git a/calico/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/calico/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/calico/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/calico/hooks/relations/kubernetes-cni/.gitignore b/calico/hooks/relations/kubernetes-cni/.gitignore index e43b0f9..8d150f3 100644 --- a/calico/hooks/relations/kubernetes-cni/.gitignore +++ b/calico/hooks/relations/kubernetes-cni/.gitignore @@ -1 +1,4 @@ .DS_Store +.tox +__pycache__ +*.pyc diff --git a/calico/hooks/relations/kubernetes-cni/provides.py b/calico/hooks/relations/kubernetes-cni/provides.py index 0b4aada..9095c19 100644 --- a/calico/hooks/relations/kubernetes-cni/provides.py +++ b/calico/hooks/relations/kubernetes-cni/provides.py @@ -1,48 +1,46 @@ #!/usr/bin/python from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path from charms.reactive import Endpoint from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag class CNIPluginProvider(Endpoint): def manage_flags(self): - toggle_flag(self.expand_name('{endpoint_name}.connected'), - self.is_joined) - toggle_flag(self.expand_name('{endpoint_name}.available'), - self.config_available()) - if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')): - clear_flag(self.expand_name('{endpoint_name}.configured')) - clear_flag(self.expand_name('endpoint.{endpoint_name}.changed')) + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")): + clear_flag(self.expand_name("{endpoint_name}.configured")) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) - def set_config(self, is_master, kubeconfig_path): - ''' Relays a dict of kubernetes configuration information. ''' + def set_config(self, is_master): + """Relays a dict of kubernetes configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'is_master': is_master, - 'kubeconfig_path': kubeconfig_path - }) - set_flag(self.expand_name('{endpoint_name}.configured')) + relation.to_publish_raw.update({"is_master": is_master}) + set_flag(self.expand_name("{endpoint_name}.configured")) def config_available(self): - ''' Ensures all config from the CNI plugin is available. ''' + """Ensures all config from the CNI plugin is available.""" goal_state = hookenv.goal_state() related_apps = [ - app for app in goal_state.get('relations', {}).get(self.endpoint_name, '') - if '/' not in app + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app ] if not related_apps: return False configs = self.get_configs() return all( - 'cidr' in config and 'cni-conf-file' in config - for config in [ - configs.get(related_app, {}) for related_app in related_apps - ] + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] ) def get_config(self, default=None): - ''' Get CNI config for one related application. + """Get CNI config for one related application. If default is specified, and there is a related application with a matching name, then that application is chosen. Otherwise, the @@ -50,13 +48,13 @@ class CNIPluginProvider(Endpoint): Whichever application is chosen, that application's CNI config is returned. - ''' + """ configs = self.get_configs() if not configs: return {} elif default and default not in configs: - msg = 'relation not found for default CNI %s, ignoring' % default - hookenv.log(msg, level='WARN') + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") return self.get_config() elif default: return configs.get(default, {}) @@ -64,7 +62,7 @@ class CNIPluginProvider(Endpoint): return configs.get(sorted(configs)[0], {}) def get_configs(self): - ''' Get CNI configs for all related applications. + """Get CNI configs for all related applications. This returns a mapping of application names to CNI configs. Here's an example return value: @@ -78,8 +76,14 @@ class CNIPluginProvider(Endpoint): 'cni-conf-file': '10-calico.conflist' } } - ''' + """ return { relation.application_name: relation.joined_units.received_raw - for relation in self.relations if relation.application_name + for relation in self.relations + if relation.application_name } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/calico/hooks/relations/kubernetes-cni/requires.py b/calico/hooks/relations/kubernetes-cni/requires.py index 039b912..2067826 100644 --- a/calico/hooks/relations/kubernetes-cni/requires.py +++ b/calico/hooks/relations/kubernetes-cni/requires.py @@ -1,45 +1,54 @@ #!/usr/bin/python +from charmhelpers.core import unitdata from charms.reactive import Endpoint from charms.reactive import when_any, when_not from charms.reactive import set_state, remove_state +db = unitdata.kv() + class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) - @when_any('endpoint.{endpoint_name}.joined', - 'endpoint.{endpoint_name}.changed') + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") def changed(self): - ''' Indicate the relation is connected, and if the relation data is - set it is also available. ''' - set_state(self.expand_name('{endpoint_name}.connected')) + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) config = self.get_config() - if config['is_master'] == 'True': - set_state(self.expand_name('{endpoint_name}.is-master')) - set_state(self.expand_name('{endpoint_name}.configured')) - elif config['is_master'] == 'False': - set_state(self.expand_name('{endpoint_name}.is-worker')) - set_state(self.expand_name('{endpoint_name}.configured')) + if config["is_master"] == "True": + set_state(self.expand_name("{endpoint_name}.is-master")) + set_state(self.expand_name("{endpoint_name}.configured")) + elif config["is_master"] == "False": + set_state(self.expand_name("{endpoint_name}.is-worker")) + set_state(self.expand_name("{endpoint_name}.configured")) else: - remove_state(self.expand_name('{endpoint_name}.configured')) - remove_state(self.expand_name('endpoint.{endpoint_name}.changed')) + remove_state(self.expand_name("{endpoint_name}.configured")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) - @when_not('endpoint.{endpoint_name}.joined') + @when_not("endpoint.{endpoint_name}.joined") def broken(self): - ''' Indicate the relation is no longer available and not connected. ''' - remove_state(self.expand_name('{endpoint_name}.connected')) - remove_state(self.expand_name('{endpoint_name}.is-master')) - remove_state(self.expand_name('{endpoint_name}.is-worker')) - remove_state(self.expand_name('{endpoint_name}.configured')) + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("{endpoint_name}.is-master")) + remove_state(self.expand_name("{endpoint_name}.is-worker")) + remove_state(self.expand_name("{endpoint_name}.configured")) def get_config(self): - ''' Get the kubernetes configuration information. ''' + """Get the kubernetes configuration information.""" return self.all_joined_units.received_raw def set_config(self, cidr, cni_conf_file): - ''' Sets the CNI configuration information. ''' + """Sets the CNI configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'cidr': cidr, - 'cni-conf-file': cni_conf_file - }) + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/calico/hooks/relations/kubernetes-cni/tox.ini b/calico/hooks/relations/kubernetes-cni/tox.ini index 077622b..69ab91a 100644 --- a/calico/hooks/relations/kubernetes-cni/tox.ini +++ b/calico/hooks/relations/kubernetes-cni/tox.ini @@ -2,22 +2,26 @@ skipsdist = True envlist = lint,py3 -[tox:travis] -3.5: lint,py3 -3.6: lint,py3 -3.7: lint,py3 - [testenv] basepython = python3 setenv = PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace deps = pyyaml pytest flake8 + black ipdb + charms.unit_test commands = pytest --tb native -s {posargs} [testenv:lint] envdir = {toxworkdir}/py3 -commands = flake8 --max-line-length=88 {toxinidir} +commands = + flake8 {toxinidir} + black --check {toxinidir} + +[flake8] +exclude=.tox +max-line-length = 88 diff --git a/calico/layer.yaml b/calico/layer.yaml index e1dc12c..db8d894 100644 --- a/calico/layer.yaml +++ b/calico/layer.yaml @@ -5,6 +5,7 @@ - "layer:basic" - "layer:leadership" - "layer:status" +- "layer:kubernetes-common" "exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"] "options": "basic": @@ -15,6 +16,7 @@ "leadership": {} "status": "patch-hookenv": !!bool "true" + "kubernetes-common": {} "calico": {} "repo": "https://github.com/juju-solutions/layer-calico.git" "is": "calico" diff --git a/calico/lib/charms/layer/basic.py b/calico/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/calico/lib/charms/layer/basic.py +++ b/calico/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/calico/lib/charms/layer/kubernetes_common.py b/calico/lib/charms/layer/kubernetes_common.py new file mode 100644 index 0000000..fb14ad2 --- /dev/null +++ b/calico/lib/charms/layer/kubernetes_common.py @@ -0,0 +1,924 @@ +#!/usr/bin/env python + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import re +import os +import subprocess +import hashlib +import json +import traceback +import random +import string +import tempfile +import yaml + +from base64 import b64decode, b64encode +from pathlib import Path +from subprocess import check_output, check_call +from socket import gethostname, getfqdn +from shlex import split +from subprocess import CalledProcessError +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core import host +from charmhelpers.core.templating import render +from charms.reactive import endpoint_from_flag, is_state +from time import sleep + +AUTH_SECRET_NS = "kube-system" +AUTH_SECRET_TYPE = "juju.is/token-auth" + +db = unitdata.kv() +kubeclientconfig_path = "/root/.kube/config" +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +kubeproxyconfig_path = "/root/cdk/kubeproxyconfig" +certs_dir = Path("/root/cdk") +ca_crt_path = certs_dir / "ca.crt" +server_crt_path = certs_dir / "server.crt" +server_key_path = certs_dir / "server.key" +client_crt_path = certs_dir / "client.crt" +client_key_path = certs_dir / "client.key" + + +def get_version(bin_name): + """Get the version of an installed Kubernetes binary. + + :param str bin_name: Name of binary + :return: 3-tuple version (maj, min, patch) + + Example:: + + >>> `get_version('kubelet') + (1, 6, 0) + + """ + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def retry(times, delay_secs): + """Decorator for retrying a method call. + + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """Decorator to wrap the function provided. + + Args: + func: Provided function should return either True od False + + Returns: A callable that would return the last call outcome + + """ + + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + + return _wrapped + + return retry_decorator + + +def calculate_resource_checksum(resource): + """Calculate a checksum for a resource""" + md5 = hashlib.md5() + path = hookenv.resource_get(resource) + if path: + with open(path, "rb") as f: + data = f.read() + md5.update(data) + return md5.hexdigest() + + +def get_resource_checksum_db_key(checksum_prefix, resource): + """Convert a resource name to a resource checksum database key.""" + return checksum_prefix + resource + + +def migrate_resource_checksums(checksum_prefix, snap_resources): + """Migrate resource checksums from the old schema to the new one""" + for resource in snap_resources: + new_key = get_resource_checksum_db_key(checksum_prefix, resource) + if not db.get(new_key): + path = hookenv.resource_get(resource) + if path: + # old key from charms.reactive.helpers.any_file_changed + old_key = "reactive.files_changed." + path + old_checksum = db.get(old_key) + db.set(new_key, old_checksum) + else: + # No resource is attached. Previously, this meant no checksum + # would be calculated and stored. But now we calculate it as if + # it is a 0-byte resource, so let's go ahead and do that. + zero_checksum = hashlib.md5().hexdigest() + db.set(new_key, zero_checksum) + + +def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + hookenv.status_set("maintenance", "Checking resources") + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + old_checksum = db.get(key) + new_checksum = calculate_resource_checksum(resource) + if new_checksum != old_checksum: + return True + return False + + +def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + checksum = calculate_resource_checksum(resource) + db.set(key, checksum) + + +def get_ingress_address(endpoint_name, ignore_addresses=None): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + # if they don't have ingress-addresses they are running a juju that + # doesn't support spaces, so just return the private address + return hookenv.unit_get("private-address") + + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) + + # Need to prefer non-fan IP addresses due to various issues, e.g. + # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 + # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll + # prioritize those last. Not technically correct, but good enough. + try: + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 + addresses = sorted(addresses, key=sort_key) + except Exception: + hookenv.log(traceback.format_exc()) + + return addresses[0] + + +def get_ingress_address6(endpoint_name): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + return None + + addresses = network_info["ingress-addresses"] + + for addr in addresses: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version == 6: + return str(ip_addr) + else: + return None + + +def service_restart(service_name): + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) + host.service_restart(service_name) + + +def service_start(service_name): + hookenv.log("Starting {0} service.".format(service_name)) + host.service_stop(service_name) + + +def service_stop(service_name): + hookenv.log("Stopping {0} service.".format(service_name)) + host.service_stop(service_name) + + +def arch(): + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" + # Get the package architecture for this system. + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode("utf-8") + return architecture + + +def get_service_ip(service, namespace="kube-system", errors_fatal=True): + try: + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) + except CalledProcessError: + if errors_fatal: + raise + else: + return None + else: + svc = json.loads(output.decode()) + return svc["spec"]["clusterIP"] + + +def kubectl(*args): + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) + return check_output(command) + + +def kubectl_success(*args): + """Runs kubectl with the given args. Returns True if successful, False if + not.""" + try: + kubectl(*args) + return True + except CalledProcessError: + return False + + +def kubectl_manifest(operation, manifest): + """Wrap the kubectl creation command when using filepath resources + :param operation - one of get, create, delete, replace + :param manifest - filepath to the manifest + """ + # Deletions are a special case + if operation == "delete": + # Ensure we immediately remove requested resources with --now + return kubectl_success(operation, "-f", manifest, "--now") + else: + # Guard against an error re-creating the same manifest multiple times + if operation == "create": + # If we already have the definition, its probably safe to assume + # creation was true. + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) + return True + # Execute the requested command that did not match any of the special + # cases above + return kubectl_success(operation, "-f", manifest) + + +def get_node_name(): + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": + return getfqdn().lower() + else: + return gethostname().lower() + + +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied + arguments for values of the Kubernetes server, CA, key, certificate, user + context and cluster.""" + if not key and not certificate and not password and not token: + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") + + # Create the config file with the address of the master server. + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) + # Delete old users + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) + # Create the credentials using the client flags. + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) + + if key and certificate: + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) + # Create a default context with the cluster. + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) + # Make the config use this new context. + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) + if keystone: + # create keystone user + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) + check_call(split(cmd)) + # create keystone context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) + check_call(split(cmd)) + # use keystone context + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) + check_call(split(cmd)) + # manually add exec command until kubectl can do it for us + with open(new_kubeconfig, "r") as f: + content = f.read() + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user + user: + exec: + command: "/snap/bin/client-keystone-auth" + apiVersion: "client.authentication.k8s.io/v1beta1" +""", + ) + with open(new_kubeconfig, "w") as f: + f.write(content) + if aws_iam_cluster_id: + # create aws-iam context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) + + # append a user for aws-iam + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) + + # not going to use aws-iam context by default since we don't have + # the desired arn. This will make the config not usable if copied. + + # cmd = 'kubectl config --kubeconfig={0} ' \ + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) + # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) + + +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, "").split() + args = {} + + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + + return args + + +def configure_kubernetes_service(key, service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = key + service + prev_snap_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + args.update(base_args) + args.update(extra_args) + + # CIS benchmark action may inject kv config to pass failing tests. Merge + # these after the func args as they should take precedence. + cis_args_key = "cis-" + service + cis_args = db.get(cis_args_key) or {} + args.update(cis_args) + + # Remove any args with 'None' values (all k8s args are 'k=v') and + # construct an arg string for use by 'snap set'. + args = {k: v for k, v in args.items() if v is not None} + args = ['--%s="%s"' % arg for arg in args.items()] + args = " ".join(args) + + snap_opts = {} + for arg in prev_snap_args: + # remove previous args by setting to null + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] + + cmd = ["snap", "set", service] + snap_opts + check_call(cmd) + + # Now that we've started doing snap configuration through the "args" + # option, we should never need to clear previous args again. + db.set(prev_args_key, {}) + + +def _snap_common_path(component): + return Path("/var/snap/{}/common".format(component)) + + +def cloud_config_path(component): + return _snap_common_path(component) / "cloud-config.conf" + + +def _gcp_creds_path(component): + return _snap_common_path(component) / "gcp-creds.json" + + +def _daemon_env_path(component): + return _snap_common_path(component) / "environment" + + +def _cloud_endpoint_ca_path(component): + return _snap_common_path(component) / "cloud-endpoint-ca.crt" + + +def encryption_config_path(): + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" + + +def write_gcp_snap_config(component): + # gcp requires additional credentials setup + gcp = endpoint_from_flag("endpoint.gcp.ready") + creds_path = _gcp_creds_path(component) + with creds_path.open("w") as fp: + os.fchmod(fp.fileno(), 0o600) + fp.write(gcp.credentials) + + # create a cloud-config file that sets token-url to nil to make the + # services use the creds env var instead of the metadata server, as + # well as making the cluster multizone + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) + + daemon_env_path = _daemon_env_path(component) + if daemon_env_path.exists(): + daemon_env = daemon_env_path.read_text() + if not daemon_env.endswith("\n"): + daemon_env += "\n" + else: + daemon_env = "" + if gcp_creds_env_key not in daemon_env: + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) + daemon_env_path.parent.mkdir(parents=True, exist_ok=True) + daemon_env_path.write_text(daemon_env) + + +def generate_openstack_cloud_config(): + # openstack requires additional credentials setup + openstack = endpoint_from_flag("endpoint.openstack.ready") + + lines = [ + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), + ] + if openstack.endpoint_tls_ca: + lines.append("ca-file = /etc/config/endpoint-ca.cert") + + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) + + if openstack.has_octavia in (True, None): + # Newer integrator charm will detect whether underlying OpenStack has + # Octavia enabled so we can set this intelligently. If we're still + # related to an older integrator, though, default to assuming Octavia + # is available. + lines.append("use-octavia = true") + else: + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") + if openstack.subnet_id: + lines.append("subnet-id = {}".format(openstack.subnet_id)) + if openstack.floating_network_id: + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) + if openstack.lb_method: + lines.append("lb-method = {}".format(openstack.lb_method)) + if openstack.manage_security_groups: + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") + if openstack.bs_version is not None: + lines.append("bs-version = {}".format(openstack.bs_version)) + if openstack.trust_device_path is not None: + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) + if openstack.ignore_volume_az is not None: + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" + + +def write_azure_snap_config(component): + azure = endpoint_from_flag("endpoint.azure.ready") + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) + + +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): + kube_proxy_opts = {} + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" + num_apis = len(api_servers) + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() + if bind_address: + kube_proxy_opts["bind-address"] = bind_address + elif is_ipv6(cluster_cidr): + kube_proxy_opts["bind-address"] = "::" + + if host.is_container(): + kube_proxy_opts["conntrack-max-per-core"] = "0" + + if is_dual_stack(cluster_cidr): + kube_proxy_opts["feature-gates"] = "IPv6DualStack=true" + + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) + + +def get_unit_number(): + return int(hookenv.local_unit().split("/")[1]) + + +def cluster_cidr(): + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") + if not cni: + return None + config = hookenv.config() + if "default-cni" in config: + # master + default_cni = config["default-cni"] + else: + # worker + kube_control = endpoint_from_flag("kube-control.dns.available") + if not kube_control: + return None + default_cni = kube_control.get_default_cni() + return cni.get_config(default=default_cni)["cidr"] + + +def is_dual_stack(cidrs): + """Detect IPv4/IPv6 dual stack from CIDRs""" + return {net.version for net in get_networks(cidrs)} == {4, 6} + + +def is_ipv4(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv4_network(cidrs) is not None + + +def is_ipv6(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv6_network(cidrs) is not None + + +def is_ipv6_preferred(cidrs): + """Detect if IPv6 is preffered from CIDRs""" + return get_networks(cidrs)[0].version == 6 + + +def get_networks(cidrs): + """Convert a comma-separated list of CIDRs to a list of networks.""" + if not cidrs: + return [] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] + + +def get_ipv4_network(cidrs): + """Get the IPv4 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(4) + + +def get_ipv6_network(cidrs): + """Get the IPv6 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(6) + + +def enable_ipv6_forwarding(): + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) + + +def get_bind_addrs(ipv4=True, ipv6=True): + """Get all global-scoped addresses that we might bind to.""" + try: + output = check_output(["ip", "-br", "addr", "show", "scope", "global"]) + except CalledProcessError: + # stderr will have any details, and go to the log + hookenv.log("Unable to determine global addresses", hookenv.ERROR) + return [] + + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") + accept_versions = set() + if ipv4: + accept_versions.add(4) + if ipv6: + accept_versions.add(6) + + addrs = [] + for line in output.decode("utf8").splitlines(): + intf, state, *intf_addrs = line.split() + if state != "UP" or any( + intf.startswith(prefix) for prefix in ignore_interfaces + ): + continue + for addr in intf_addrs: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version in accept_versions: + addrs.append(str(ip_addr)) + return addrs + + +class InvalidVMwareHost(Exception): + pass + + +def _get_vmware_uuid(): + serial_id_file = "/sys/class/dmi/id/product_serial" + # The serial id from VMWare VMs comes in following format: + # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 + try: + with open(serial_id_file, "r") as f: + serial_string = f.read().strip() + if "VMware-" not in serial_string: + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) + raise InvalidVMwareHost + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) + uuid = "%s-%s-%s-%s-%s" % ( + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) + except IOError as err: + hookenv.log("Unable to read UUID from sysfs: {}".format(err)) + uuid = "UNKNOWN" + + return uuid + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None diff --git a/calico/reactive/calico.py b/calico/reactive/calico.py index 4d49298..1410181 100644 --- a/calico/reactive/calico.py +++ b/calico/reactive/calico.py @@ -12,8 +12,9 @@ from subprocess import check_call, check_output, CalledProcessError, STDOUT from charms.leadership import leader_get, leader_set from charms.reactive import when, when_not, when_any, set_state, remove_state from charms.reactive import hook, is_state -from charms.reactive import endpoint_from_flag -from charms.reactive import data_changed +from charms.reactive import endpoint_from_flag, endpoint_from_name +from charms.reactive import data_changed, any_file_changed +from charms.reactive import register_trigger from charmhelpers.core.hookenv import ( log, resource_get, @@ -32,7 +33,8 @@ from charmhelpers.core.host import ( service_running ) from charmhelpers.core.templating import render -from charms.layer import status +from charms.layer import kubernetes_common, status +from charms.layer.kubernetes_common import kubectl # TODO: # - Handle the 'stop' hook by stopping and uninstalling all the things. @@ -52,6 +54,10 @@ ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert') ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca') CALICO_UPGRADE_DIR = '/opt/calico-upgrade' +register_trigger( + when="cni.kubeconfig.changed", clear_flag="calico.service.installed" +) + @hook('upgrade-charm') def upgrade_charm(): @@ -75,6 +81,8 @@ def upgrade_charm(): 'calico-v3-npc-cleanup-needed': True, 'calico-v3-completion-needed': True }) + cni = endpoint_from_name('cni') + cni.manage_flags() @when('leadership.is_leader', 'leadership.set.calico-v3-data-migration-needed', @@ -238,6 +246,7 @@ def check_etcd_changes(): ETCD_CA_PATH) remove_state('calico.service.installed') remove_state('calico.npc.deployed') + remove_state('calico.cni.configured') def get_mtu(): @@ -278,13 +287,52 @@ def get_bind_address(): return unit_private_ip() +@when('leadership.is_leader', 'leadership.set.calico-v3-data-ready') +@when_not('leadership.set.calico-node-token') +def create_calico_node_token(): + ''' Create the system:calico-node user token ''' + status.maintenance('Creating system:calico-node user token') + token = kubernetes_common.token_generator() + user = 'system:calico-node' + success = kubernetes_common.create_secret( + token=token, + username=user, + user=user + ) + if not success: + log('Failed to create system:calico-node user token, will retry') + status.waiting('Waiting to retry creating calico-node token') + return + # create_secret may have added the :: prefix. Get the new token. + token = kubernetes_common.get_secret_password(user) + if not token: + log('Failed to get system:calico-node user token, will retry') + status.waiting('Waiting to retry creating calico-node token') + return + leader_set({'calico-node-token': token}) + + @when('calico.binaries.installed', 'etcd.available', - 'calico.etcd-credentials.installed', - 'leadership.set.calico-v3-data-ready') + 'calico.etcd-credentials.installed', 'cni.kubeconfig.available', + 'leadership.set.calico-node-token', 'leadership.set.calico-v3-data-ready') @when_not('calico.service.installed') def install_calico_service(): ''' Install the calico-node systemd service. ''' status.maintenance('Installing calico-node service.') + + with open(kubernetes_common.kubeclientconfig_path) as f: + kubeconfig = yaml.safe_load(f) + any_file_changed([kubernetes_common.kubeclientconfig_path]) + kubeconfig['users'] = [{ + 'name': 'calico-node', + 'user': { + 'token': leader_get('calico-node-token') + } + }] + kubeconfig['contexts'][0]['context']['user'] = 'calico-node' + with open('/opt/calicoctl/kubeconfig', 'w') as f: + yaml.dump(kubeconfig, f) + etcd = endpoint_from_flag('etcd.available') service_path = os.path.join(os.sep, 'lib', 'systemd', 'system', 'calico-node.service') @@ -309,6 +357,7 @@ def install_calico_service(): check_call(['systemctl', 'daemon-reload']) service_restart('calico-node') service('enable', 'calico-node') + remove_state('cni.kubeconfig.changed') set_state('calico.service.installed') @@ -398,14 +447,13 @@ def configure_cni(): cni = endpoint_from_flag('cni.is-worker') etcd = endpoint_from_flag('etcd.available') os.makedirs('/etc/cni/net.d', exist_ok=True) - cni_config = cni.get_config() ip_versions = {net.version for net in get_networks(charm_config('cidr'))} context = { 'connection_string': etcd.get_connection_string(), 'etcd_key_path': ETCD_KEY_PATH, 'etcd_cert_path': ETCD_CERT_PATH, 'etcd_ca_path': ETCD_CA_PATH, - 'kubeconfig_path': cni_config['kubeconfig_path'], + 'kubeconfig_path': '/opt/calicoctl/kubeconfig', 'mtu': get_mtu(), 'assign_ipv4': 'true' if 4 in ip_versions else 'false', 'assign_ipv6': 'true' if 6 in ip_versions else 'false', @@ -483,6 +531,14 @@ def configure_bgp_globals(): spec = bgp_config['spec'] spec['asNumber'] = config['global-as-number'] spec['nodeToNodeMeshEnabled'] = config['node-to-node-mesh'] + spec['serviceClusterIPs'] = [ + {'cidr': cidr} + for cidr in config['bgp-service-cluster-ips'].split() + ] + spec['serviceExternalIPs'] = [ + {'cidr': cidr} + for cidr in config['bgp-service-external-ips'].split() + ] calicoctl_apply(bgp_config) except CalledProcessError: log(traceback.format_exc()) @@ -493,7 +549,9 @@ def configure_bgp_globals(): @when_any('config.changed.global-as-number', - 'config.changed.node-to-node-mesh') + 'config.changed.node-to-node-mesh', + 'config.changed.bgp-service-cluster-ips', + 'config.changed.bgp-service-external-ips') def reconfigure_bgp_globals(): remove_state('calico.bgp.globals.configured') @@ -709,15 +767,6 @@ def calicoctl_apply(data): calicoctl('apply', '-f', path) -def kubectl(*args): - cmd = ['kubectl', '--kubeconfig=/root/.kube/config'] + list(args) - try: - return check_output(cmd) - except CalledProcessError as e: - log(e.output) - raise - - def get_calicoctl_env(): etcd = endpoint_from_flag('etcd.available') env = {} diff --git a/calico/script/bootstrap b/calico/script/bootstrap deleted file mode 100644 index b69771c..0000000 --- a/calico/script/bootstrap +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -x - -sudo apt update -sudo apt install -qyf docker.io -sudo snap install charm --classic -sudo snap install yq diff --git a/calico/script/build b/calico/script/build deleted file mode 100644 index 6bbbc48..0000000 --- a/calico/script/build +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm build -r --force -o "$CHARM_BUILD_DIR" diff --git a/calico/script/upload b/calico/script/upload deleted file mode 100644 index 1bb581d..0000000 --- a/calico/script/upload +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH - -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm whoami -RET=$? -if ((RET > 0)); then - echo "Not logged into charmstore" - exit 1 -fi - -function generate::attachments -{ - ./build-calico-resource.sh - touch calico-node-image.tar.gz - - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \ - calico-node-image=calico-node-image.tar.gz - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \ - calico=calico-amd64.tar.gz - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \ - calico-arm64=calico-arm64.tar.gz - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \ - calico-upgrade=calico-upgrade-amd64.tar.gz - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \ - calico-upgrade-arm64=calico-upgrade-arm64.tar.gz -} - - -function generate::resource::argument -{ - py_script=" -import sys -import json -resources_json = json.load(sys.stdin) -resource_map = [] -for item in resources_json: - resource_map.append(f\"--resource {item['Name']}-{item['Revision']}\") - -print(' '.join(resource_map)) -" - charm list-resources cs:~"$NAMESPACE"/"$CHARM" --channel unpublished --format json | env python3 -c "$py_script" -} - -URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url) -generate::attachments - -if [ "$CHANNEL" != unpublished ]; then - charm release "$URL" --channel "$CHANNEL" $(generate::resource::argument) -fi diff --git a/calico/templates/calico-node.service b/calico/templates/calico-node.service index 214ea7a..bbac1c0 100644 --- a/calico/templates/calico-node.service +++ b/calico/templates/calico-node.service @@ -21,6 +21,7 @@ ExecStart=/usr/local/sbin/charm-env --charm calico conctl run \ --env ETCD_KEY_FILE={{ etcd_key_path }} \ --env NODENAME={{ nodename }} \ --env IP={{ ip }} \ + --env KUBECONFIG=/opt/calicoctl/kubeconfig \ {% if ipv4 == "none" -%} --env CALICO_ROUTER_ID="hash" \ {% endif -%} diff --git a/kubernetes-master/templates/cdk.master.auth-webhook-secret.yaml b/calico/templates/cdk.auth-webhook-secret.yaml similarity index 100% rename from kubernetes-master/templates/cdk.master.auth-webhook-secret.yaml rename to calico/templates/cdk.auth-webhook-secret.yaml diff --git a/calico/templates/policy-controller.yaml b/calico/templates/policy-controller.yaml index 7611de0..c064f94 100644 --- a/calico/templates/policy-controller.yaml +++ b/calico/templates/policy-controller.yaml @@ -104,20 +104,131 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: - - apiGroups: - - "" + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - - apiGroups: - - "" + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] resources: - nodes/status verbs: + # Needed for clearing NodeNetworkUnavailable flag. - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -128,9 +239,8 @@ roleRef: kind: ClusterRole name: calico-node subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system +- kind: User + name: system:calico-node --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/calico/tests/00-setup b/calico/tests/00-setup deleted file mode 100755 index f0616a5..0000000 --- a/calico/tests/00-setup +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -sudo add-apt-repository ppa:juju/stable -y -sudo apt-get update -sudo apt-get install amulet python-requests -y diff --git a/calico/tests/10-deploy b/calico/tests/10-deploy deleted file mode 100755 index dd2c51f..0000000 --- a/calico/tests/10-deploy +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/python3 - -import amulet -import requests -import unittest - - -class TestCharm(unittest.TestCase): - def setUp(self): - self.d = amulet.Deployment() - - self.d.add('layer-calico-cni') - self.d.expose('layer-calico-cni') - - self.d.setup(timeout=900) - self.d.sentry.wait() - - self.unit = self.d.sentry['layer-calico-cni'][0] - - def test_service(self): - # test we can access over http - page = requests.get('http://{}'.format(self.unit.info['public-address'])) - self.assertEqual(page.status_code, 200) - # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform - # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods: - # - .info - An array of the information of that unit from Juju - # - .file(PATH) - Get the details of a file on that unit - # - .file_contents(PATH) - Get plain text output of PATH file from that unit - # - .directory(PATH) - Get details of directory - # - .directory_contents(PATH) - List files and folders in PATH on that unit - # - .relation(relation, service:rel) - Get relation data from return service diff --git a/calico/tests/data/bird-operator/config.yaml b/calico/tests/data/bird-operator/config.yaml new file mode 100644 index 0000000..45e04fc --- /dev/null +++ b/calico/tests/data/bird-operator/config.yaml @@ -0,0 +1,9 @@ +options: + as-number: + type: int + description: AS Number + default: 64512 + bgp-peers: + type: string + description: BGP peers + default: "[]" diff --git a/calico/tests/data/bird-operator/metadata.yaml b/calico/tests/data/bird-operator/metadata.yaml new file mode 100644 index 0000000..9bacbcc --- /dev/null +++ b/calico/tests/data/bird-operator/metadata.yaml @@ -0,0 +1,7 @@ +name: bird +description: | + Test charm running BIRD +summary: | + Test charm running BIRD +series: +- focal diff --git a/calico/tests/data/bird-operator/requirements.txt b/calico/tests/data/bird-operator/requirements.txt new file mode 100644 index 0000000..2d81d3b --- /dev/null +++ b/calico/tests/data/bird-operator/requirements.txt @@ -0,0 +1 @@ +ops diff --git a/calico/tests/data/bird-operator/src/charm.py b/calico/tests/data/bird-operator/src/charm.py new file mode 100755 index 0000000..89d936c --- /dev/null +++ b/calico/tests/data/bird-operator/src/charm.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +import logging + +from ops.charm import CharmBase +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus +from subprocess import check_call +import yaml + +log = logging.getLogger(__name__) +bird_config_base = """ +log syslog all; +debug protocols all; + +protocol kernel { + persist; + scan time 20; + export all; +} + +protocol device { + scan time 10; +} +""" +bird_config_peer = """ +protocol bgp { + import all; + local as %s; + neighbor %s as %s; + direct; +} +""" + + +class BirdCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.install, self.install) + self.framework.observe(self.on.config_changed, self.config_changed) + + def install(self, event): + self.unit.status = MaintenanceStatus("Installing BIRD") + check_call(['apt-get', 'update']) + check_call(['apt-get', 'install', '-y', 'bird']) + + def config_changed(self, event): + self.unit.status = MaintenanceStatus("Configuring BIRD") + as_number = self.config['as-number'] + bird_config = "\n".join([bird_config_base] + [ + bird_config_peer % (as_number, peer['address'], peer['as-number']) + for peer in yaml.safe_load(self.config['bgp-peers']) + ]) + with open('/etc/bird/bird.conf', 'w') as f: + f.write(bird_config) + check_call(['systemctl', 'reload', 'bird']) + self.unit.status = ActiveStatus() + + +if __name__ == "__main__": + main(BirdCharm) diff --git a/calico/tests/data/bundle.yaml b/calico/tests/data/bundle.yaml new file mode 100644 index 0000000..1ae35f9 --- /dev/null +++ b/calico/tests/data/bundle.yaml @@ -0,0 +1,80 @@ +description: A minimal two-machine Kubernetes cluster, appropriate for development. +series: focal +machines: + '0': + constraints: cores=2 mem=4G root-disk=16G + series: focal + '1': + constraints: cores=4 mem=4G root-disk=16G + series: focal +services: + containerd: + charm: cs:~containers/containerd + channel: edge + easyrsa: + charm: cs:~containers/easyrsa + channel: edge + num_units: 1 + to: + - '1' + etcd: + charm: cs:~containers/etcd + channel: edge + num_units: 1 + options: + channel: 3.4/stable + to: + - '0' + calico: + charm: {{calico_charm}} + resources: + calico: {{resource_path}}/calico-amd64.tar.gz + calico-arm64: {{resource_path}}/calico-arm64.tar.gz + calico-upgrade: {{resource_path}}/calico-upgrade-amd64.tar.gz + calico-upgrade-arm64: {{resource_path}}/calico-upgrade-arm64.tar.gz + calico-node-image: {{resource_path}}/calico-node-image.tar.gz + options: + ignore-loose-rpf: true + vxlan: Always + kubernetes-master: + charm: cs:~containers/kubernetes-master + channel: edge + constraints: cores=2 mem=4G root-disk=16G + expose: true + num_units: 1 + options: + channel: 1.22/edge + to: + - '0' + kubernetes-worker: + charm: cs:~containers/kubernetes-worker + channel: edge + constraints: cores=4 mem=4G root-disk=16G + expose: true + num_units: 1 + options: + channel: 1.22/edge + to: + - '1' +relations: +- - kubernetes-master:kube-control + - kubernetes-worker:kube-control +- - kubernetes-master:certificates + - easyrsa:client +- - kubernetes-master:etcd + - etcd:db +- - kubernetes-worker:certificates + - easyrsa:client +- - etcd:certificates + - easyrsa:client +- - calico:etcd + - etcd:db +- - calico:cni + - kubernetes-master:cni +- - calico:cni + - kubernetes-worker:cni +- - containerd:containerd + - kubernetes-worker:container-runtime +- - containerd:containerd + - kubernetes-master:container-runtime + diff --git a/kata/tests/conftest.py b/calico/tests/functional/conftest.py similarity index 100% rename from kata/tests/conftest.py rename to calico/tests/functional/conftest.py diff --git a/calico/tests/functional/test_k8s_common.py b/calico/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/calico/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/calico/tests/integration/conftest.py b/calico/tests/integration/conftest.py new file mode 100644 index 0000000..6bd4784 --- /dev/null +++ b/calico/tests/integration/conftest.py @@ -0,0 +1,36 @@ +from kubernetes_wrapper import Kubernetes +import logging +import pytest +import random +import string + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +@pytest.mark.asyncio +async def kubernetes(ops_test): + kubeconfig_path = ops_test.tmp_path / "kubeconfig" + retcode, stdout, stderr = await ops_test.run( + "juju", "scp", "kubernetes-master/leader:config", kubeconfig_path + ) + if retcode != 0: + log.error(f"retcode: {retcode}") + log.error(f"stdout:\n{stdout.strip()}") + log.error(f"stderr:\n{stderr.strip()}") + pytest.fail("Failed to copy kubeconfig from kubernetes-master") + namespace = "test-calico-integration-" + "".join( + random.choice(string.ascii_lowercase + string.digits) + for _ in range(5) + ) + kubernetes = Kubernetes(namespace, kubeconfig=str(kubeconfig_path)) + namespace_object = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': namespace + } + } + kubernetes.apply_object(namespace_object) + yield kubernetes + kubernetes.delete_object(namespace_object) diff --git a/calico/tests/integration/test_calico_integration.py b/calico/tests/integration/test_calico_integration.py new file mode 100644 index 0000000..03bd3d8 --- /dev/null +++ b/calico/tests/integration/test_calico_integration.py @@ -0,0 +1,139 @@ +import logging +import os +import pytest +import time +import yaml + +log = logging.getLogger(__name__) + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test): + resource_path = ops_test.tmp_path / "charm-resources" + resource_path.mkdir() + resource_build_script = os.path.abspath("./build-calico-resource.sh") + log.info("Building charm resources") + retcode, stdout, stderr = await ops_test.run( + resource_build_script, + cwd=resource_path + ) + if retcode != 0: + log.error(f"retcode: {retcode}") + log.error(f"stdout:\n{stdout.strip()}") + log.error(f"stderr:\n{stderr.strip()}") + pytest.fail("Failed to build charm resources") + bundle = ops_test.render_bundle( + "tests/data/bundle.yaml", + calico_charm=await ops_test.build_charm("."), + resource_path=resource_path + ) + # deploy with Juju CLI because libjuju does not support local resource + # paths in bundles + log.info("Deploying bundle") + retcode, stdout, stderr = await ops_test.run( + "juju", "deploy", "-m", ops_test.model_full_name, bundle + ) + if retcode != 0: + log.error(f"retcode: {retcode}") + log.error(f"stdout:\n{stdout.strip()}") + log.error(f"stderr:\n{stderr.strip()}") + pytest.fail("Failed to deploy bundle") + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60) + + +async def test_bgp_service_ip_advertisement(ops_test, kubernetes): + # deploy a test service in k8s (nginx) + deployment = { + 'apiVersion': 'apps/v1', + 'kind': 'Deployment', + 'metadata': { + 'name': 'nginx' + }, + 'spec': { + 'selector': { + 'matchLabels': { + 'app': 'nginx' + } + }, + 'template': { + 'metadata': { + 'labels': { + 'app': 'nginx' + } + }, + 'spec': { + 'containers': [{ + 'name': 'nginx', + 'image': 'rocks.canonical.com/cdk/nginx:1.18', + 'ports': [{ + 'containerPort': 80 + }] + }] + } + } + } + } + service = { + 'apiVersion': 'v1', + 'kind': 'Service', + 'metadata': { + 'name': 'nginx' + }, + 'spec': { + 'selector': { + 'app': 'nginx' + }, + 'ports': [{ + 'protocol': 'TCP', + 'port': 80 + }] + } + + } + kubernetes.apply_object(deployment) + kubernetes.apply_object(service) + service_ip = kubernetes.read_object(service).spec.cluster_ip + + # build and deploy bird charm + bird_charm = await ops_test.build_charm("tests/data/bird-operator") + await ops_test.model.deploy(bird_charm) + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 10) + + # configure calico to peer with bird + master_config = await ops_test.model.applications['kubernetes-master'].get_config() + bird_app = ops_test.model.applications['bird'] + calico_app = ops_test.model.applications['calico'] + await calico_app.set_config({ + 'bgp-service-cluster-ips': master_config['service-cidr']['value'], + 'global-bgp-peers': yaml.dump([ + {'address': unit.public_address, 'as-number': 64512} + for unit in bird_app.units + ]) + }) + + # configure bird to peer with calico + await bird_app.set_config({ + 'bgp-peers': yaml.dump([ + {'address': unit.public_address, 'as-number': 64512} + for unit in calico_app.units + ]) + }) + + # verify test service is reachable from bird + deadline = time.time() + 60 * 10 + while time.time() < deadline: + retcode, stdout, stderr = await ops_test.run( + 'juju', 'ssh', '-m', ops_test.model_full_name, 'bird/leader', + 'curl', '--connect-timeout', '10', service_ip + ) + if retcode == 0: + break + else: + pytest.fail("Failed service connection test after BGP config") + + # clean up + await calico_app.set_config({ + 'bgp-service-cluster-ips': '', + 'global-bgp-peers': '[]' + }) + await bird_app.destroy() diff --git a/calico/tests/conftest.py b/calico/tests/unit/conftest.py similarity index 100% rename from calico/tests/conftest.py rename to calico/tests/unit/conftest.py diff --git a/calico/tests/test_calico.py b/calico/tests/unit/test_calico.py similarity index 100% rename from calico/tests/test_calico.py rename to calico/tests/unit/test_calico.py diff --git a/calico/tests/unit/test_k8s_common.py b/calico/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..0dcad31 --- /dev/null +++ b/calico/tests/unit/test_k8s_common.py @@ -0,0 +1,122 @@ +import json +import string +from subprocess import CalledProcessError +from unittest.mock import Mock + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" diff --git a/calico/tests/validate-wheelhouse.sh b/calico/tests/validate-wheelhouse.sh new file mode 100755 index 0000000..72f7131 --- /dev/null +++ b/calico/tests/validate-wheelhouse.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +build_dir="$(mktemp -d)" +function cleanup { rm -rf "$build_dir"; } +trap cleanup EXIT + +charm build . --build-dir "$build_dir" +pip install -f "$build_dir/calico/wheelhouse" --no-index --no-cache-dir "$build_dir"/calico/wheelhouse/* diff --git a/calico/tox.ini b/calico/tox.ini index b8ee144..24b70de 100644 --- a/calico/tox.ini +++ b/calico/tox.ini @@ -1,18 +1,41 @@ +[flake8] +max-line-length = 88 + [tox] skipsdist = True -envlist = lint,py3 +envlist = lint,unit,integration [testenv] -basepython = python3 setenv = PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace + +[testenv:unit] deps = pyyaml pytest - flake8 + charms.unit_test ipdb - git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test -commands = pytest --tb native -s {posargs} +commands = pytest --tb native -s {posargs} {toxinidir}/tests/unit + +[testenv:validate-wheelhouse] +allowlist_externals = {toxinidir}/tests/validate-wheelhouse.sh +commands = {toxinidir}/tests/validate-wheelhouse.sh + +[testenv:integration] +deps = + pytest + pytest-operator + aiohttp + ipdb + git+https://github.com/canonical/kubernetes-rapper@main#egg=kubernetes-wrapper +# tox only passes through the upper-case versions by default, but some +# programs, such as wget or pip, only honor the lower-case versions +passenv = http_proxy https_proxy no_proxy +commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration [testenv:lint] -commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests +deps = + flake8 +commands = + flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests diff --git a/calico/version b/calico/version index 91808cc..20817dd 100644 --- a/calico/version +++ b/calico/version @@ -1 +1 @@ -0ea81f0c \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/calico/wheelhouse/charmhelpers-0.20.22.tar.gz b/calico/wheelhouse/charmhelpers-0.20.22.tar.gz deleted file mode 100644 index bd5d222..0000000 Binary files a/calico/wheelhouse/charmhelpers-0.20.22.tar.gz and /dev/null differ diff --git a/calico/wheelhouse/charmhelpers-0.20.23.tar.gz b/calico/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/calico/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/calico/wheelhouse/pyaml-20.4.0.tar.gz b/calico/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/calico/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/calico/wheelhouse/pyaml-21.10.1.tar.gz b/calico/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/calico/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/containerd/.build.manifest b/containerd/.build.manifest index d57a8eb..82e6908 100644 --- a/containerd/.build.manifest +++ b/containerd/.build.manifest @@ -1,47 +1,47 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "be187bfe2ed511fc7ee29bf25f7374a2d6d34b2d", "url": "layer:container-runtime-common" }, { - "branch": "refs/heads/stable", - "rev": "8a4e635092c98cef3eecd27063c7b2ae030e740e", + "branch": "refs/heads/master", + "rev": "e87057806fcbeb67d222b14e1ce2e4fafdf58f9d", "url": "containerd" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", "url": "interface:container-runtime" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "b59ce0c44bc52c789175750ce18b42f76c9a4578", "url": "interface:untrusted-container-runtime" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "365ec9f348ccd561a9ec3e084c826f134676439e", "url": "interface:docker-registry" } @@ -52,6 +52,16 @@ "dynamic", "unchecked" ], + ".github/workflows/main.yaml": [ + "containerd", + "static", + "f413dfd54279707a2dc0ebf6f5e399a1a65170a879ca126c63f1d98f543d0dd7" + ], + ".github/workflows/main.yml": [ + "layer:basic", + "static", + "96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473" + ], ".gitignore": [ "containerd", "static", @@ -62,11 +72,6 @@ "static", "ab2c8c5a3ae50ec307e9e19ec30a20d4765161e0cb3bddb66f09c4a1b72b7f71" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "LICENSE": [ "containerd", "static", @@ -110,7 +115,7 @@ "config.yaml": [ "containerd", "dynamic", - "93c92f8e530d50a436eab8dd2573a78e2d911aa53e9866b08ac61d6efec7e3f4" + "9b0153cac5602ecd84e860bb784a6477d46ea61b62eb1f0d0946fce06d859c09" ], "copyright": [ "layer:status", @@ -425,7 +430,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/container_runtime_common.py": [ "layer:container-runtime-common", @@ -485,7 +490,7 @@ "reactive/containerd.py": [ "containerd", "static", - "ca60ebe176530f379308dda0bba4d193acfcf8ce1e7ec923db6438b8b2f74933" + "4e80ec104f9e8c2bfbf1577a698b1df7fa37ce10907ab2bb71f96b7f672e4639" ], "reactive/status.py": [ "layer:status", @@ -505,7 +510,12 @@ "templates/config.toml": [ "containerd", "static", - "d84f9f266929e684c0b0a596704f075d26b97d7a0e43b525364a77dc22d2f320" + "f149f8147f7f7997420c34530a51eafffa71038f1426e3b4e7d7e3fbf89afac3" + ], + "templates/config_v2.toml": [ + "containerd", + "static", + "e842b1318fceef9e839623fe082cc7d6575820331882876330337176a6cbc542" ], "templates/proxy.conf": [ "containerd", @@ -540,12 +550,12 @@ "version": [ "containerd", "dynamic", - "2737d85a96f3fb093896eb885501ad940a695d5b9bb1d0d3816ace9eb68df82e" + "b56954e631fc8006577e9ce5f54fd067e695024061bc82733fb1408f1880a860" ], "wheelhouse.txt": [ "containerd", "dynamic", - "ff85b4195a997d8df2b05ce61b4e943a2fafb9152a7a7c7d112edd723d9e7d3c" + "ae13f54eb8741a216957d0d6c39051a9d88641cb3050331d03742dc895c71959" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -553,7 +563,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -567,30 +577,30 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/certifi-2020.12.5.tar.gz": [ + "wheelhouse/certifi-2021.10.8.tar.gz": [ "__pip__", "dynamic", - "1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c" + "78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872" ], - "wheelhouse/chardet-4.0.0.tar.gz": [ - "__pip__", - "dynamic", - "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa" - ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", "dynamic", "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616" ], - "wheelhouse/idna-2.10.tar.gz": [ + "wheelhouse/charset-normalizer-2.0.7.tar.gz": [ "__pip__", "dynamic", - "b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6" + "e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0" + ], + "wheelhouse/idna-3.3.tar.gz": [ + "__pip__", + "dynamic", + "9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d" ], "wheelhouse/netaddr-0.7.19.tar.gz": [ "layer:basic", @@ -607,15 +617,15 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], - "wheelhouse/requests-2.25.1.tar.gz": [ + "wheelhouse/requests-2.26.0.tar.gz": [ "containerd", "dynamic", - "27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804" + "b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -627,15 +637,15 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], - "wheelhouse/urllib3-1.26.4.tar.gz": [ + "wheelhouse/urllib3-1.26.7.tar.gz": [ "__pip__", "dynamic", - "e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" + "4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", diff --git a/containerd/.github/workflows/main.yaml b/containerd/.github/workflows/main.yaml new file mode 100644 index 0000000..c2324fe --- /dev/null +++ b/containerd/.github/workflows/main.yaml @@ -0,0 +1,38 @@ +name: Run tests with Tox + +on: [push] + +jobs: + unit-tests: + name: Lint, Unit Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run Tox + run: tox # Run tox using the version of Python in `PATH` + + integration-tests: + name: Integration test with LXD + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: lxd + - name: Run integration test + run: tox -e integration diff --git a/containerd/.github/workflows/main.yml b/containerd/.github/workflows/main.yml new file mode 100644 index 0000000..565bfaf --- /dev/null +++ b/containerd/.github/workflows/main.yml @@ -0,0 +1,50 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox -e flake8 + functional-test: + name: Functional test with LXD + runs-on: ubuntu-latest + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Dependencies + run: | + pip install tox + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + - name: Run test + run: tox -e func + - name: Show Status + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju status -m "$model" + - name: Show Error Logs + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju debug-log -m "$model" --replay --no-tail --level ERROR diff --git a/containerd/.travis/profile-update.yaml b/containerd/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/containerd/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/containerd/config.yaml b/containerd/config.yaml index 2356a3b..48ec703 100644 --- a/containerd/config.yaml +++ b/containerd/config.yaml @@ -84,3 +84,41 @@ addresses) which should be accessed directly, rather than through the proxy defined in http_proxy or https_proxy. Must be less than 2023 characters long. + "config_version": + "type": "string" + "default": "v1" + "description": | + (Use carefully, v2 is only tested for nvidia gpu operator) + Use value "v2" for this config parameter to enable new configuration format. + Config file is parsed as version 1 by default. + Version 2 uses long plugin names, i.e. "io.containerd.grpc.v1.cri" vs "cri". + "nvidia_apt_key_urls": + "type": "string" + "default": | + https://nvidia.github.io/nvidia-container-runtime/gpgkey + https://developer.download.nvidia.com/compute/cuda/repos/{id}{version_id_no_dot}/x86_64/7fa2af80.pub + "description": | + Space-separated list of APT GPG key URLs to add when using Nvidia GPUs. + + Supported template options: + {id}: OS release ID, e.g. "ubuntu" + {version_id}: OS release version ID, e.g. "20.04" + {version_id_no_dot}: OS release version ID with no dot, e.g. "2004" + "nvidia_apt_sources": + "type": "string" + "default": | + deb https://nvidia.github.io/libnvidia-container/{id}{version_id}/$(ARCH) / + deb https://nvidia.github.io/nvidia-container-runtime/{id}{version_id}/$(ARCH) / + deb http://developer.download.nvidia.com/compute/cuda/repos/{id}{version_id_no_dot}/x86_64 / + "description": | + Newline-separated list of APT sources to add when using Nvidia GPUs. + + Supported template options: + {id}: OS release ID, e.g. "ubuntu" + {version_id}: OS release version ID, e.g. "20.04" + {version_id_no_dot}: OS release version ID with no dot, e.g. "2004" + "nvidia_apt_packages": + "type": "string" + "default": "cuda-drivers nvidia-container-runtime" + "description": | + Space-separated list of APT packages to install when using Nvidia GPUs. diff --git a/containerd/lib/charms/layer/basic.py b/containerd/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/containerd/lib/charms/layer/basic.py +++ b/containerd/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/containerd/reactive/containerd.py b/containerd/reactive/containerd.py index 8691575..f971c3d 100644 --- a/containerd/reactive/containerd.py +++ b/containerd/reactive/containerd.py @@ -18,7 +18,8 @@ from charms.reactive import ( set_state, is_state, remove_state, - endpoint_from_flag + endpoint_from_flag, + register_trigger ) from charms.layer import containerd, status @@ -59,10 +60,18 @@ DB = unitdata.kv() CONTAINERD_PACKAGE = 'containerd' -NVIDIA_PACKAGES = [ - 'cuda-drivers', - 'nvidia-container-runtime', -] +register_trigger( + when='config.changed.nvidia_apt_key_urls', + clear_flag='containerd.nvidia.ready' +) +register_trigger( + when='config.changed.nvidia_apt_sources', + clear_flag='containerd.nvidia.ready' +) +register_trigger( + when='config.changed.nvidia_apt_packages', + clear_flag='containerd.nvidia.ready' +) def _check_containerd(): @@ -196,6 +205,22 @@ def populate_host_for_custom_registries(custom_registries): return custom_registries +def insert_docker_io_to_custom_registries(custom_registries): + """ + Ensure the default docker.io registry exists. + + Also gives a way for configuration to override the url for it. + If a docker.io host entry doesn't exist, we'll add one. + """ + if isinstance(custom_registries, list): + if not any(d.get('host') == 'docker.io' for d in custom_registries): + custom_registries.insert(0, { + "host": "docker.io", + "url": "https://registry-1.docker.io" + }) + return custom_registries + + def merge_custom_registries(config_directory, custom_registries, old_custom_registries): """ @@ -210,6 +235,7 @@ def merge_custom_registries(config_directory, custom_registries, registries += json.loads(custom_registries) # json string already converted to python list here registries = populate_host_for_custom_registries(registries) + registries = insert_docker_io_to_custom_registries(registries) old_registries = [] if (old_custom_registries): old_registries += json.loads(old_custom_registries) @@ -246,6 +272,16 @@ def upgrade_charm(): # Re-render config in case the template has changed in the new charm. config_changed() + # Clean up old nvidia sources.list.d files + old_source_files = [ + '/etc/apt/sources.list.d/nvidia-container-runtime.list', + '/etc/apt/sources.list.d/cuda.list' + ] + for source_file in old_source_files: + if os.path.exists(source_file): + os.remove(source_file) + remove_state('containerd.nvidia.ready') + @when_not('containerd.br_netfilter.enabled') def enable_br_netfilter_module(): @@ -345,48 +381,38 @@ def configure_nvidia(): status.maintenance('Installing Nvidia drivers.') dist = host.lsb_release() - release = '{}{}'.format( - dist['DISTRIB_ID'].lower(), - dist['DISTRIB_RELEASE'] - ) + os_release_id = dist['DISTRIB_ID'].lower() + os_release_version_id = dist['DISTRIB_RELEASE'] + os_release_version_id_no_dot = os_release_version_id.replace('.', '') proxies = { "http": config('http_proxy'), "https": config('https_proxy') } - ncr_gpg_key = requests.get( - 'https://nvidia.github.io/nvidia-container-runtime/gpgkey', proxies=proxies).text - import_key(ncr_gpg_key) - with open( - '/etc/apt/sources.list.d/nvidia-container-runtime.list', 'w' - ) as f: - f.write( - 'deb ' - 'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n' - .format(release) - ) - f.write( - 'deb ' - 'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n' - .format(release) + key_urls = config('nvidia_apt_key_urls').split() + for key_url in key_urls: + formatted_key_url = key_url.format( + id=os_release_id, + version_id=os_release_version_id, + version_id_no_dot=os_release_version_id_no_dot ) + gpg_key = requests.get(formatted_key_url, proxies=proxies).text + import_key(gpg_key) - cuda_gpg_key = requests.get( - 'https://developer.download.nvidia.com/' - 'compute/cuda/repos/{}/x86_64/7fa2af80.pub' - .format(release.replace('.', '')), proxies=proxies - ).text - import_key(cuda_gpg_key) - with open('/etc/apt/sources.list.d/cuda.list', 'w') as f: - f.write( - 'deb ' - 'http://developer.download.nvidia.com/' - 'compute/cuda/repos/{}/x86_64 /\n' - .format(release.replace('.', '')) + sources = config('nvidia_apt_sources').splitlines() + formatted_sources = [ + source.format( + id=os_release_id, + version_id=os_release_version_id, + version_id_no_dot=os_release_version_id_no_dot ) + for source in sources + ] + with open('/etc/apt/sources.list.d/nvidia.list', 'w') as f: + f.write('\n'.join(formatted_sources)) apt_update() - - apt_install(NVIDIA_PACKAGES, fatal=True) + packages = config('nvidia_apt_packages').split() + apt_install(packages, fatal=True) set_state('containerd.nvidia.ready') config_changed() @@ -406,11 +432,11 @@ def purge_containerd(): apt_purge(CONTAINERD_PACKAGE, fatal=True) if is_state('containerd.nvidia.ready'): - apt_purge(NVIDIA_PACKAGES, fatal=True) + nvidia_packages = config('nvidia_apt_packages').split() + apt_purge(nvidia_packages, fatal=True) sources = [ - '/etc/apt/sources.list.d/cuda.list', - '/etc/apt/sources.list.d/nvidia-container-runtime.list' + '/etc/apt/sources.list.d/nvidia.list' ] for f in sources: @@ -450,6 +476,10 @@ def config_changed(): # Create "dumb" context based on Config to avoid triggering config.changed context = dict(config()) + if context['config_version'] == "v2": + template_config = "config_v2.toml" + else: + template_config = "config.toml" config_file = 'config.toml' config_directory = '/etc/containerd' @@ -497,7 +527,7 @@ def config_changed(): context['runtime'] = 'runc' render( - config_file, + template_config, os.path.join(config_directory, config_file), context ) diff --git a/containerd/templates/config.toml b/containerd/templates/config.toml index 69064bc..edf1667 100644 --- a/containerd/templates/config.toml +++ b/containerd/templates/config.toml @@ -58,8 +58,6 @@ oom_score = 0 conf_template = "" [plugins.cri.registry] [plugins.cri.registry.mirrors] - [plugins.cri.registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] {% if custom_registries -%} {% for registry in custom_registries -%} {% if registry.host -%} @@ -111,3 +109,4 @@ oom_score = 0 mutation_threshold = 100 schedule_delay = "0s" startup_delay = "100ms" + diff --git a/containerd/templates/config_v2.toml b/containerd/templates/config_v2.toml new file mode 100644 index 0000000..489144e --- /dev/null +++ b/containerd/templates/config_v2.toml @@ -0,0 +1,112 @@ +root = "/var/lib/containerd" +state = "/run/containerd" +oom_score = 0 +version = 2 + +[grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + +[debug] + address = "" + uid = 0 + gid = 0 + level = "" + +[metrics] + address = "" + grpc_histogram = false + +[cgroup] + path = "" + +[plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + stream_server_address = "127.0.0.1" + stream_server_port = "0" + enable_selinux = false + sandbox_image = "{{ sandbox_image }}" + stats_collect_period = 10 + systemd_cgroup = false + enable_tls_streaming = false + max_container_log_line_size = 16384 + [plugins."io.containerd.grpc.v1.cri".containerd] + no_pivot = false + {% if untrusted %} + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type= "io.containerd.{{ untrusted_name }}.v2" + {% endif %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v1" + {% if untrusted %} + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ untrusted_name }}] + runtime_type= "io.containerd.{{ untrusted_name }}.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ untrusted_name }}.options] + Runtime = "{{ untrusted_binary }}" + RuntimeRoot = "{{ untrusted_path }}" + {% endif %} + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["https://registry-1.docker.io"] + {% if custom_registries -%} + {% for registry in custom_registries -%} + {% if registry.host -%} + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry.host }}"] + {% if registry.url -%} + endpoint = ["{{ registry.url}}"] + {% endif -%} + {% endif -%} + {% endfor -%} + {% endif -%} + {% if custom_registries %} + [plugins."io.containerd.grpc.v1.cri".registry.auths] + {% for registry in custom_registries %} + {% if registry.username and registry.password %} + [plugins."io.containerd.grpc.v1.cri".registry.auths."{{ registry.url }}"] + username = "{{ registry.username }}" + password = "{{ registry.password }}" + {% endif %} + {% endfor %} + [plugins."io.containerd.grpc.v1.cri".registry.configs] + {% for registry in custom_registries %} + {% if registry.ca or registry.cert or registry.key or registry.insecure_skip_verify %} + [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry.url }}".tls] + ca_file = "{{ registry.ca if registry.ca else '' }}" + cert_file = "{{ registry.cert if registry.cert else '' }}" + key_file = "{{ registry.key if registry.key else '' }}" + insecure_skip_verify = {{ "true" if registry.insecure_skip_verify else "false" }} + {% endif %} + {% endfor %} + {% endif %} + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.runtime.v1.linux"] + shim = "{{ shim }}" + runtime = "{{ runtime }}" + runtime_root = "" + no_shim = false + shim_debug = false + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" diff --git a/containerd/version b/containerd/version index 1dea0b1..20817dd 100644 --- a/containerd/version +++ b/containerd/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/containerd/wheelhouse.txt b/containerd/wheelhouse.txt index 3bd771d..b98ac9b 100644 --- a/containerd/wheelhouse.txt +++ b/containerd/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 diff --git a/containerd/wheelhouse/certifi-2020.12.5.tar.gz b/containerd/wheelhouse/certifi-2020.12.5.tar.gz deleted file mode 100644 index 3023d0a..0000000 Binary files a/containerd/wheelhouse/certifi-2020.12.5.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/certifi-2021.10.8.tar.gz b/containerd/wheelhouse/certifi-2021.10.8.tar.gz new file mode 100644 index 0000000..9e1581b Binary files /dev/null and b/containerd/wheelhouse/certifi-2021.10.8.tar.gz differ diff --git a/containerd/wheelhouse/chardet-4.0.0.tar.gz b/containerd/wheelhouse/chardet-4.0.0.tar.gz deleted file mode 100644 index 6bfc4e3..0000000 Binary files a/containerd/wheelhouse/chardet-4.0.0.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz b/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/containerd/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/charmhelpers-0.20.23.tar.gz b/containerd/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/containerd/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/containerd/wheelhouse/charset-normalizer-2.0.7.tar.gz b/containerd/wheelhouse/charset-normalizer-2.0.7.tar.gz new file mode 100644 index 0000000..61df022 Binary files /dev/null and b/containerd/wheelhouse/charset-normalizer-2.0.7.tar.gz differ diff --git a/containerd/wheelhouse/idna-2.10.tar.gz b/containerd/wheelhouse/idna-2.10.tar.gz deleted file mode 100644 index e9a9e03..0000000 Binary files a/containerd/wheelhouse/idna-2.10.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/idna-3.3.tar.gz b/containerd/wheelhouse/idna-3.3.tar.gz new file mode 100644 index 0000000..ff2bcbf Binary files /dev/null and b/containerd/wheelhouse/idna-3.3.tar.gz differ diff --git a/containerd/wheelhouse/pyaml-20.4.0.tar.gz b/containerd/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/containerd/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/pyaml-21.10.1.tar.gz b/containerd/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/containerd/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/containerd/wheelhouse/requests-2.25.1.tar.gz b/containerd/wheelhouse/requests-2.25.1.tar.gz deleted file mode 100644 index 9dcfcf2..0000000 Binary files a/containerd/wheelhouse/requests-2.25.1.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/requests-2.26.0.tar.gz b/containerd/wheelhouse/requests-2.26.0.tar.gz similarity index 100% rename from kata/wheelhouse/requests-2.26.0.tar.gz rename to containerd/wheelhouse/requests-2.26.0.tar.gz diff --git a/containerd/wheelhouse/six-1.15.0.tar.gz b/containerd/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/containerd/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/six-1.16.0.tar.gz b/containerd/wheelhouse/six-1.16.0.tar.gz similarity index 100% rename from kata/wheelhouse/six-1.16.0.tar.gz rename to containerd/wheelhouse/six-1.16.0.tar.gz diff --git a/containerd/wheelhouse/urllib3-1.26.4.tar.gz b/containerd/wheelhouse/urllib3-1.26.4.tar.gz deleted file mode 100644 index 4d693e7..0000000 Binary files a/containerd/wheelhouse/urllib3-1.26.4.tar.gz and /dev/null differ diff --git a/containerd/wheelhouse/urllib3-1.26.7.tar.gz b/containerd/wheelhouse/urllib3-1.26.7.tar.gz new file mode 100644 index 0000000..990abe6 Binary files /dev/null and b/containerd/wheelhouse/urllib3-1.26.7.tar.gz differ diff --git a/easyrsa/.build.manifest b/easyrsa/.build.manifest index 3652ef7..2b3616e 100644 --- a/easyrsa/.build.manifest +++ b/easyrsa/.build.manifest @@ -1,37 +1,37 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", "url": "layer:leadership" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/stable", + "branch": "refs/heads/master", "rev": "44f635b92624be5882c70ca1544d79f5d8483e24", "url": "easyrsa" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d9850016d930a6d507b9fd45e2598d327922b140", "url": "interface:tls-certificates" } @@ -42,6 +42,11 @@ "dynamic", "unchecked" ], + ".github/workflows/main.yml": [ + "layer:basic", + "static", + "96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473" + ], ".github/workflows/tox.yaml": [ "easyrsa", "static", @@ -52,11 +57,6 @@ "static", "3d3d61b1e6228c5d03ea369331e493d0688f94416a0384c5c0b41194e4297d33" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "CONTRIBUTING.md": [ "easyrsa", "static", @@ -360,7 +360,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -440,7 +440,7 @@ "wheelhouse.txt": [ "layer:basic", "dynamic", - "7cf3f983dc8f85b0c0ca6d69accdb4f4af842a911625286df09005ed1897d797" + "44b8a3ab6ccaf3a81c8a96526a285462e01964e6090fd40104f3a087bab43c0c" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -448,7 +448,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -462,10 +462,10 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", @@ -487,10 +487,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -502,10 +502,10 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", diff --git a/easyrsa/.github/workflows/main.yml b/easyrsa/.github/workflows/main.yml new file mode 100644 index 0000000..565bfaf --- /dev/null +++ b/easyrsa/.github/workflows/main.yml @@ -0,0 +1,50 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox -e flake8 + functional-test: + name: Functional test with LXD + runs-on: ubuntu-latest + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Dependencies + run: | + pip install tox + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + - name: Run test + run: tox -e func + - name: Show Status + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju status -m "$model" + - name: Show Error Logs + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju debug-log -m "$model" --replay --no-tail --level ERROR diff --git a/easyrsa/.travis/profile-update.yaml b/easyrsa/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/easyrsa/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/easyrsa/lib/charms/layer/basic.py b/easyrsa/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/easyrsa/lib/charms/layer/basic.py +++ b/easyrsa/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/easyrsa/version b/easyrsa/version index 1dea0b1..20817dd 100644 --- a/easyrsa/version +++ b/easyrsa/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/easyrsa/wheelhouse.txt b/easyrsa/wheelhouse.txt index c2337ba..7197e18 100644 --- a/easyrsa/wheelhouse.txt +++ b/easyrsa/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 diff --git a/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz b/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/easyrsa/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/easyrsa/wheelhouse/charmhelpers-0.20.23.tar.gz b/easyrsa/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/easyrsa/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz b/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/easyrsa/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/easyrsa/wheelhouse/pyaml-21.10.1.tar.gz b/easyrsa/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/easyrsa/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/easyrsa/wheelhouse/six-1.15.0.tar.gz b/easyrsa/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/easyrsa/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/easyrsa/wheelhouse/six-1.16.0.tar.gz b/easyrsa/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/easyrsa/wheelhouse/six-1.16.0.tar.gz differ diff --git a/etcd/.build.manifest b/etcd/.build.manifest index d4b891e..3669718 100644 --- a/etcd/.build.manifest +++ b/etcd/.build.manifest @@ -1,79 +1,89 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "023c67941e18663a4df49f53edba809f43ba5069", "url": "layer:cis-benchmark" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", "url": "layer:leadership" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", "url": "layer:nagios" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", "url": "layer:tls-client" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "85d7cc4f7180d19df20e264358e920004cec192b", + "branch": "refs/heads/master", + "rev": "d3acdf209cbaf5b732e9aba621778a0f56dbaeb9", "url": "layer:snap" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688", "url": "layer:cdk-service-kicker" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/stable", - "rev": "53d38096a6de8d4bcc18a2cb64a94d904c496660", + "branch": "refs/heads/master", + "rev": "77eef0c0a49507b74fc90cec0864fdd85555f982", "url": "etcd" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc", "url": "interface:nrpe-external-master" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d9850016d930a6d507b9fd45e2598d327922b140", "url": "interface:tls-certificates" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382", "url": "interface:etcd" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "71b16123e38d9f8e2a38558e4f057f5071e56daa", "url": "interface:etcd-proxy" + }, + { + "branch": "refs/heads/master", + "rev": "e64261e281f012a00d374c6779ec52e488cb8713", + "url": "interface:grafana-dashboard" + }, + { + "branch": "refs/heads/master", + "rev": "3f775242c16d53243c993d7ba0c896169ad1639e", + "url": "interface:prometheus-manual" } ], "signatures": { @@ -82,6 +92,11 @@ "dynamic", "unchecked" ], + ".github/workflows/main.yml": [ + "layer:basic", + "static", + "96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473" + ], ".github/workflows/tox.yaml": [ "etcd", "static", @@ -97,11 +112,6 @@ "static", "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "CONTRIBUTING.md": [ "etcd", "static", @@ -397,6 +407,31 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], + "hooks/grafana-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/grafana-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/hook.template": [ "layer:basic", "static", @@ -452,6 +487,31 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], + "hooks/prometheus-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/prometheus-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/proxy-relation-broken": [ "layer:basic", "dynamic", @@ -542,6 +602,66 @@ "static", "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e" ], + "hooks/relations/grafana-dashboard/.gitignore": [ + "interface:grafana-dashboard", + "static", + "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881" + ], + "hooks/relations/grafana-dashboard/LICENSE": [ + "interface:grafana-dashboard", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/grafana-dashboard/README.md": [ + "interface:grafana-dashboard", + "static", + "d46e6c55423b4f0e28f803702632739582f3c0fad5d0427346f210eba8879685" + ], + "hooks/relations/grafana-dashboard/__init__.py": [ + "interface:grafana-dashboard", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/grafana-dashboard/common.py": [ + "interface:grafana-dashboard", + "static", + "965f19c07d3475d7fe5a21235dc0cf1a27f11da9dad498d0cd1a51260b999aa3" + ], + "hooks/relations/grafana-dashboard/copyright": [ + "interface:grafana-dashboard", + "static", + "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4" + ], + "hooks/relations/grafana-dashboard/docs/common.md": [ + "interface:grafana-dashboard", + "static", + "ab69cc6e293b66175dfeee09707f8d02659ae5ba5b9aa4c441295a1025db12f7" + ], + "hooks/relations/grafana-dashboard/docs/provides.md": [ + "interface:grafana-dashboard", + "static", + "626b5655ce1e9f7733c86379fe67709e840b760046d899e5d761b034f94d939e" + ], + "hooks/relations/grafana-dashboard/docs/requires.md": [ + "interface:grafana-dashboard", + "static", + "4f78cff5a0395aff8477267e925066bfa93654eaeb4ba812c682f968171cca55" + ], + "hooks/relations/grafana-dashboard/interface.yaml": [ + "interface:grafana-dashboard", + "static", + "97e4c9a33360708668aa0330323fe9e9e5e95fa5a1e02d4f6b8e8dc60e155b52" + ], + "hooks/relations/grafana-dashboard/provides.py": [ + "interface:grafana-dashboard", + "static", + "cd63928094e6d34be92944ce65cb5b01ff9ba2bd9646036d006fa743a3c0fdb5" + ], + "hooks/relations/grafana-dashboard/requires.py": [ + "interface:grafana-dashboard", + "static", + "b071b9e66a3206351f563d7a4d160499b13a6af29d80930cb01720b5974e1dd2" + ], "hooks/relations/nrpe-external-master/README.md": [ "interface:nrpe-external-master", "static", @@ -567,6 +687,66 @@ "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], + "hooks/relations/prometheus-manual/.gitignore": [ + "interface:prometheus-manual", + "static", + "5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881" + ], + "hooks/relations/prometheus-manual/LICENSE": [ + "interface:prometheus-manual", + "static", + "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30" + ], + "hooks/relations/prometheus-manual/README.md": [ + "interface:prometheus-manual", + "static", + "506d4a334ebbe40905c76fc74e4ab5285d836ac28c7d1087b85b5a304960be2e" + ], + "hooks/relations/prometheus-manual/__init__.py": [ + "interface:prometheus-manual", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/prometheus-manual/common.py": [ + "interface:prometheus-manual", + "static", + "013107b3bc8f148779ada8097db725ac9c3d22c605a5794cb8bae95cace9fa4c" + ], + "hooks/relations/prometheus-manual/copyright": [ + "interface:prometheus-manual", + "static", + "ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4" + ], + "hooks/relations/prometheus-manual/docs/common.md": [ + "interface:prometheus-manual", + "static", + "91b9e9300a2fef2ce1112cdc57a224ee06ab513ea127edc8a59b6ce9c715cd25" + ], + "hooks/relations/prometheus-manual/docs/provides.md": [ + "interface:prometheus-manual", + "static", + "6b226c2587dbf5b304e6466f2b31bbb208512896b2ab057b11b646cf3501e292" + ], + "hooks/relations/prometheus-manual/docs/requires.md": [ + "interface:prometheus-manual", + "static", + "0100bdc38afd892336747eac005260bc9656ffc1a40f9fb0faef824ab07c1021" + ], + "hooks/relations/prometheus-manual/interface.yaml": [ + "interface:prometheus-manual", + "static", + "4a268318ee2adcc8a5a3482d49595d3805f94bf8976bd1ee4a4f7f9db89e472e" + ], + "hooks/relations/prometheus-manual/provides.py": [ + "interface:prometheus-manual", + "static", + "232917934637d8905ddcd448ce51c2c30dcb9217e043592be356d510c09190c4" + ], + "hooks/relations/prometheus-manual/requires.py": [ + "interface:prometheus-manual", + "static", + "0492a9f1037f39479f2e607162aa48ca67451e00124541a7d56f7e0a920903e0" + ], "hooks/relations/tls-certificates/.gitignore": [ "interface:tls-certificates", "static", @@ -660,7 +840,7 @@ "layer.yaml": [ "etcd", "dynamic", - "359a37ecaba6aa516c993260ae2978f840e2228f5944249fa7a5ea399963e628" + "c66d59abd20fb4af93d95f2fa5d13ce9eb1693f619c74a2efc5ce2eaa5989f98" ], "lib/charms/layer/__init__.py": [ "layer:basic", @@ -670,7 +850,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -690,7 +870,7 @@ "lib/charms/layer/snap.py": [ "layer:snap", "static", - "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be" + "f278a3b06a1604e1c59f107d2ff3e9f5705e3c6c7be7a012c1a500d0fc8925df" ], "lib/charms/layer/status.py": [ "layer:status", @@ -720,7 +900,7 @@ "lib/etcd_lib.py": [ "etcd", "static", - "a550f3409eede8c85d1e2bdd86bf32f2ab64b31b6fd321d204aab0f8def78055" + "bffbc6ba8374fbcf7d56b678aee5cabfe935cbbbff6ab1fcaab8da127f25bbf6" ], "lib/etcdctl.py": [ "etcd", @@ -735,7 +915,7 @@ "metadata.yaml": [ "etcd", "dynamic", - "373432b73726cb36c0b719ae91a39888d9cb66db3d17703ca57ba7641c327907" + "5b4f1b35359784fb6228d18051ebe1fc4218d757d36affd15a0acecc41bdcccd" ], "pydocmd.yml": [ "layer:status", @@ -755,7 +935,7 @@ "reactive/etcd.py": [ "etcd", "static", - "e2e941191031b3632c6457e806aca66796755417b871d50c967b9d78c526e8a9" + "82533bea8ce1a7201bd2a6f4b6bd1351370a9c84957d813274e143ea908c2999" ], "reactive/leadership.py": [ "layer:leadership", @@ -817,6 +997,11 @@ "static", "3ab6570d48daaa95ef87f28db1d333177fb7942f31e8157b3ac71c1ea319b108" ], + "templates/grafana_dashboard.json.j2": [ + "etcd", + "static", + "4d60e5e6211aa609f271567efa7fcbdc1dc25ca10d41b68fd473916b35f5a0a4" + ], "templates/service-always-restart.systemd-229.conf": [ "etcd", "static", @@ -865,22 +1050,27 @@ "tox.ini": [ "etcd", "static", - "53e1c829a1c652bb9739d79a206af4f1cb2c9605fb9c2bd590da52012301eb09" + "0c893707ff1ee537da640b538dadd1dd9d3cfe8f886c1e3ed165c40ae7c21c4b" + ], + "unit_tests/lib/test_etcd_lib.py": [ + "etcd", + "static", + "74daf7645e3c172106f3aded3995ad32ce7ac32aede9afab52b52f898bf617bb" ], "unit_tests/test_etcdctl.py": [ "etcd", "static", - "755b1f55a504862332219addc124ca36f50940831d7d6a2068aa74b42c618198" + "bc3c259b337fd9064c0ac7ce7f15e56e39d81b8cc186024be5ef46e8e48dea91" ], "version": [ "etcd", "dynamic", - "e769e4fb7e0ce598f5767cab04dbda0b3cd5fce9bea776b97aa6bc80f4cc4999" + "dade3247ceda164d3855d5984b15d394cff71eb8eafa1e202327576145f0ad6e" ], "wheelhouse.txt": [ "etcd", "dynamic", - "8c850ecab7e9c4a34020262a19101996418d65234d6a9a8a2ace0d58076e7095" + "57f8b4334d2be2b03a58c29f42ad8394c179f4ff85153e7feddd4d157644e5e5" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -888,7 +1078,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -902,10 +1092,10 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", @@ -932,10 +1122,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -947,15 +1137,15 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], - "wheelhouse/tenacity-7.0.0.tar.gz": [ + "wheelhouse/tenacity-5.0.3.tar.gz": [ "layer:snap", "dynamic", - "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1" + "24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", diff --git a/etcd/.github/workflows/main.yml b/etcd/.github/workflows/main.yml new file mode 100644 index 0000000..565bfaf --- /dev/null +++ b/etcd/.github/workflows/main.yml @@ -0,0 +1,50 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox -e flake8 + functional-test: + name: Functional test with LXD + runs-on: ubuntu-latest + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Dependencies + run: | + pip install tox + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + - name: Run test + run: tox -e func + - name: Show Status + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju status -m "$model" + - name: Show Error Logs + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju debug-log -m "$model" --replay --no-tail --level ERROR diff --git a/etcd/.travis/profile-update.yaml b/etcd/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/etcd/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/kata/hooks/config-changed b/etcd/hooks/grafana-relation-broken similarity index 100% rename from kata/hooks/config-changed rename to etcd/hooks/grafana-relation-broken diff --git a/kata/hooks/containerd-relation-broken b/etcd/hooks/grafana-relation-changed similarity index 100% rename from kata/hooks/containerd-relation-broken rename to etcd/hooks/grafana-relation-changed diff --git a/kata/hooks/containerd-relation-changed b/etcd/hooks/grafana-relation-created similarity index 100% rename from kata/hooks/containerd-relation-changed rename to etcd/hooks/grafana-relation-created diff --git a/kata/hooks/containerd-relation-created b/etcd/hooks/grafana-relation-departed similarity index 100% rename from kata/hooks/containerd-relation-created rename to etcd/hooks/grafana-relation-departed diff --git a/kata/hooks/containerd-relation-departed b/etcd/hooks/grafana-relation-joined similarity index 100% rename from kata/hooks/containerd-relation-departed rename to etcd/hooks/grafana-relation-joined diff --git a/kata/hooks/containerd-relation-joined b/etcd/hooks/prometheus-relation-broken similarity index 100% rename from kata/hooks/containerd-relation-joined rename to etcd/hooks/prometheus-relation-broken diff --git a/kata/hooks/hook.template b/etcd/hooks/prometheus-relation-changed old mode 100644 new mode 100755 similarity index 100% rename from kata/hooks/hook.template rename to etcd/hooks/prometheus-relation-changed diff --git a/kata/hooks/install b/etcd/hooks/prometheus-relation-created similarity index 100% rename from kata/hooks/install rename to etcd/hooks/prometheus-relation-created diff --git a/kata/hooks/leader-elected b/etcd/hooks/prometheus-relation-departed similarity index 100% rename from kata/hooks/leader-elected rename to etcd/hooks/prometheus-relation-departed diff --git a/kata/hooks/leader-settings-changed b/etcd/hooks/prometheus-relation-joined similarity index 100% rename from kata/hooks/leader-settings-changed rename to etcd/hooks/prometheus-relation-joined diff --git a/etcd/hooks/relations/grafana-dashboard/.gitignore b/etcd/hooks/relations/grafana-dashboard/.gitignore new file mode 100644 index 0000000..01a6a44 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/.gitignore @@ -0,0 +1,3 @@ +.docs +__pycache__ +*.pyc diff --git a/kata/LICENSE b/etcd/hooks/relations/grafana-dashboard/LICENSE similarity index 99% rename from kata/LICENSE rename to etcd/hooks/relations/grafana-dashboard/LICENSE index 261eeb9..d645695 100644 --- a/kata/LICENSE +++ b/etcd/hooks/relations/grafana-dashboard/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/etcd/hooks/relations/grafana-dashboard/README.md b/etcd/hooks/relations/grafana-dashboard/README.md new file mode 100644 index 0000000..b111350 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/README.md @@ -0,0 +1,92 @@ +# Interface grafana-dashboard + +This is a [Juju][] interface layer that enables a charm which provides +dashboards to be imported into Grafana. + +You can download existing [Grafana Dashboards][] or use the [Grafana Dashboard +Reference][] to create your own. + +# Example Usage + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +provides: + grafana: + interface: grafana-dashboard +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:grafana-dashboard +``` + +Then, in your reactive code, add the following, modifying the dashboard data as +your charm needs: + +```python +import json +from charms.reactive import endpoint_from_flag + + +@when('endpoint.grafana.joined') +def register_grafana_dashboards(): + grafana = endpoint_from_flag('endpoint.grafana.joined') + for dashboard_file in Path('files/grafana').glob('*.json'): + dashboard = json.loads(dashboard_file.read_text()) + grafana.register_dashboard(name=dashboard_file.stem, + dashboard=dashboard) +``` + + + +# Reference + +* [common.md](common.md) + * [ImportRequest](docs/common.md#importrequest) + * [egress_subnets](docs/common.md#importrequest-egress_subnets) + * [ingress_address](docs/common.md#importrequest-ingress_address) + * [is_created](docs/common.md#importrequest-is_created) + * [is_received](docs/common.md#importrequest-is_received) + * [respond](docs/common.md#importrequest-respond) + * [ImportResponse](docs/common.md#importresponse) + * [name](docs/common.md#importresponse-name) +* [provides.md](provides.md) + * [GrafanaDashboardProvides](docs/provides.md#grafanadashboardprovides) + * [all_departed_units](docs/provides.md#grafanadashboardprovides-all_departed_units) + * [all_joined_units](docs/provides.md#grafanadashboardprovides-all_joined_units) + * [all_units](docs/provides.md#grafanadashboardprovides-all_units) + * [endpoint_name](docs/provides.md#grafanadashboardprovides-endpoint_name) + * [failed_imports](docs/provides.md#grafanadashboardprovides-failed_imports) + * [is_joined](docs/provides.md#grafanadashboardprovides-is_joined) + * [joined](docs/provides.md#grafanadashboardprovides-joined) + * [manage_flags](docs/provides.md#grafanadashboardprovides-manage_flags) + * [register_dashboard](docs/provides.md#grafanadashboardprovides-register_dashboard) + * [relations](docs/provides.md#grafanadashboardprovides-relations) + * [requests](docs/provides.md#grafanadashboardprovides-requests) + * [responses](docs/provides.md#grafanadashboardprovides-responses) +* [requires.md](requires.md) + * [GrafanaDashboardRequires](docs/requires.md#grafanadashboardrequires) + * [all_departed_units](docs/requires.md#grafanadashboardrequires-all_departed_units) + * [all_joined_units](docs/requires.md#grafanadashboardrequires-all_joined_units) + * [all_requests](docs/requires.md#grafanadashboardrequires-all_requests) + * [all_units](docs/requires.md#grafanadashboardrequires-all_units) + * [endpoint_name](docs/requires.md#grafanadashboardrequires-endpoint_name) + * [is_joined](docs/requires.md#grafanadashboardrequires-is_joined) + * [joined](docs/requires.md#grafanadashboardrequires-joined) + * [manage_flags](docs/requires.md#grafanadashboardrequires-manage_flags) + * [new_requests](docs/requires.md#grafanadashboardrequires-new_requests) + * [relations](docs/requires.md#grafanadashboardrequires-relations) + + + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Grafana Dashboards]: https://grafana.com/grafana/dashboards +[Grafana Dashboard Reference]: https://grafana.com/docs/reference/dashboard/ diff --git a/kata/hooks/relations/container-runtime/__init__.py b/etcd/hooks/relations/grafana-dashboard/__init__.py similarity index 100% rename from kata/hooks/relations/container-runtime/__init__.py rename to etcd/hooks/relations/grafana-dashboard/__init__.py diff --git a/etcd/hooks/relations/grafana-dashboard/common.py b/etcd/hooks/relations/grafana-dashboard/common.py new file mode 100644 index 0000000..99db2d8 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/common.py @@ -0,0 +1,38 @@ +from charms.reactive import BaseRequest, BaseResponse, Field + + +class ImportResponse(BaseResponse): + success = Field(description='Whether or not the import succeeded') + reason = Field(description='If failed, a description of why') + + @property + def name(self): + """ + The name given when the import was requested. + """ + return self.request.name + + +class ImportRequest(BaseRequest): + RESPONSE_CLASS = ImportResponse + + name = Field(description=""" + Name of the dashboard to import. Informational only, so that + you can tell which dashboard request this was, e.g. to check + for success or failure. + """) + + dashboard = Field(description=""" + Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized + JSON.) + """) + + def respond(self, success, reason=None): + """ + Acknowledge this request, and indicate success or failure with an + optional explanation. + """ + # wrap the base respond method to make the success field required and + # positional, as well as to provide a better doc string + super().respond(success=success, reason=reason) diff --git a/etcd/hooks/relations/grafana-dashboard/copyright b/etcd/hooks/relations/grafana-dashboard/copyright new file mode 100644 index 0000000..69768db --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/etcd/hooks/relations/grafana-dashboard/docs/common.md b/etcd/hooks/relations/grafana-dashboard/docs/common.md new file mode 100644 index 0000000..ab7de1d --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/docs/common.md @@ -0,0 +1,50 @@ +# `class ImportRequest(BaseRequest)` + +Base class for requests using the request / response pattern. + +Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of +the :class:`BaseResponse` which defines the fields that the response will +use. They must also define additional attributes as :class:`Field`s. + +For example:: + + class TLSResponse(BaseResponse): + key = Field('Private key for the cert') + cert = Field('Public cert info') + + + class TLSRequest(BaseRequest): + RESPONSE_CLASS = TLSResponse + + common_name = Field('Common Name (CN) for the cert to be created') + sans = Field('List of Subject Alternative Names (SANs)') + +## `egress_subnets` + +Subnets over which network traffic to the requester will flow. + +## `ingress_address` + +Address to use if a connection to the requester is required. + +## `is_created` + +Whether this request was created by this side of the relation. + +## `is_received` + +Whether this request was received by the other side of the relation. + +## `def respond(self, success, reason=None)` + +Acknowledge this request, and indicate success or failure with an +optional explanation. + +# `class ImportResponse(BaseResponse)` + +Base class for responses using the request / response pattern. + +## `name` + +The name given when the import was requested. + diff --git a/etcd/hooks/relations/grafana-dashboard/docs/provides.md b/etcd/hooks/relations/grafana-dashboard/docs/provides.md new file mode 100644 index 0000000..cc1f3dc --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/docs/provides.md @@ -0,0 +1,120 @@ +# `class GrafanaDashboardProvides(RequesterEndpoint)` + +Base class for Endpoints that create requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `failed_imports` + +A list of requests that failed to import. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `def register_dashboard(self, name, dashboard)` + +Request a dashboard to be imported. + +:param name: Name of dashboard. Informational only, so that you can + tell which dashboard request this was, e.g. to check for success or + failure. +:param dashboard: Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized JSON.) + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + +## `requests` + +A list of all requests which have been submitted. + +## `responses` + +A list of all responses which have been received. + diff --git a/etcd/hooks/relations/grafana-dashboard/docs/requires.md b/etcd/hooks/relations/grafana-dashboard/docs/requires.md new file mode 100644 index 0000000..c84f1bc --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/docs/requires.md @@ -0,0 +1,109 @@ +# `class GrafanaDashboardRequires(ResponderEndpoint)` + +Base class for Endpoints that respond to requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_requests` + +A list of all requests, including ones which have been responded to. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `new_requests` + +A list of requests which have not been responded. + +Requests should be handled by the charm and then responded to by +calling ``request.respond(...)``. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + diff --git a/etcd/hooks/relations/grafana-dashboard/interface.yaml b/etcd/hooks/relations/grafana-dashboard/interface.yaml new file mode 100644 index 0000000..0ee9ef8 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/interface.yaml @@ -0,0 +1,6 @@ +name: grafana-dashboard +summary: Interface for importing dashboards into Grafana +version: 1 +maintainer: "Cory Johns " +exclude: + - .docs diff --git a/etcd/hooks/relations/grafana-dashboard/provides.py b/etcd/hooks/relations/grafana-dashboard/provides.py new file mode 100644 index 0000000..670ded9 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/provides.py @@ -0,0 +1,42 @@ +from charms.reactive import ( + toggle_flag, + RequesterEndpoint, +) + +from .common import ImportRequest + + +class GrafanaDashboardProvides(RequesterEndpoint): + REQUEST_CLASS = ImportRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.failed'), + self.is_joined and self.failed_imports) + + @property + def failed_imports(self): + """ + A list of requests that failed to import. + """ + return [response + for response in self.responses + if not response.success] + + def register_dashboard(self, name, dashboard): + """ + Request a dashboard to be imported. + + :param name: Name of dashboard. Informational only, so that you can + tell which dashboard request this was, e.g. to check for success or + failure. + :param dashboard: Data structure defining the dashboard. Must be JSON + serializable. (Note: This should *not* be pre-serialized JSON.) + """ + # we might be connected to multiple grafanas for some strange + # reason, so just send the dashboard to all of them + for relation in self.relations: + ImportRequest.create_or_update(match_fields=['name'], + relation=relation, + name=name, + dashboard=dashboard) diff --git a/etcd/hooks/relations/grafana-dashboard/requires.py b/etcd/hooks/relations/grafana-dashboard/requires.py new file mode 100644 index 0000000..de696c1 --- /dev/null +++ b/etcd/hooks/relations/grafana-dashboard/requires.py @@ -0,0 +1,15 @@ +from charms.reactive import ( + toggle_flag, + ResponderEndpoint, +) + +from .common import ImportRequest + + +class GrafanaDashboardRequires(ResponderEndpoint): + REQUEST_CLASS = ImportRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.requests'), + self.is_joined and self.new_requests) diff --git a/etcd/hooks/relations/prometheus-manual/.gitignore b/etcd/hooks/relations/prometheus-manual/.gitignore new file mode 100644 index 0000000..01a6a44 --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/.gitignore @@ -0,0 +1,3 @@ +.docs +__pycache__ +*.pyc diff --git a/kata/hooks/relations/container-runtime/LICENSE b/etcd/hooks/relations/prometheus-manual/LICENSE similarity index 99% rename from kata/hooks/relations/container-runtime/LICENSE rename to etcd/hooks/relations/prometheus-manual/LICENSE index 261eeb9..d645695 100644 --- a/kata/hooks/relations/container-runtime/LICENSE +++ b/etcd/hooks/relations/prometheus-manual/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/etcd/hooks/relations/prometheus-manual/README.md b/etcd/hooks/relations/prometheus-manual/README.md new file mode 100644 index 0000000..4ff5c83 --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/README.md @@ -0,0 +1,113 @@ +# Interface prometheus-manual + +This is a [Juju][] interface layer that enables a charm which provides manual +or raw metric scraper job configuration stanzas for Prometheus 2. + +The format for the job configuration data can be found in the [Prometheus +Configuration Docs][]. The job configuration will be included as an item +under `scrape_configs` largely unchanged, except for two things: + +* To ensure uniqueness, the provided job name will have a UUID appended to it. +* Because the CA cert must be written to disk separately from the config, any + `tls_config` sections will have their `ca_file` field values replaced with + the path to the file where the provided `ca_cert` data is written. + +# Example Usage + +First, you must define the relation endpoint in your charm's `metadata.yaml`: + +```yaml +provides: + prometheus: + interface: prometheus-manual +``` + +Next, you must ensure the interface layer is included in your `layer.yaml`: + +```yaml +includes: + - interface:prometheus-manual +``` + +Then, in your reactive code, add the following, modifying the job data as +your charm needs: + +```python +from charms.reactive import endpoint_from_flag + + +@when('endpoint.prometheus.joined', + 'tls.ca.available') +def register_prometheus_jobs(): + prometheus = endpoint_from_flag('endpoint.prometheus.joined') + tls = endpoint_from_flag('tls.ca.available') + prometheus.register_job(job_name='kubernetes-apiservers', + ca_cert=tls.root_ca_cert, + job_data={ + 'kubernetes_sd_configs': [{'role': 'endpoints'}], + 'scheme': 'https', + 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename + 'bearer_token': get_token('system:prometheus'), + }) + prometheus.register_job(job_name='kubernetes-nodes', + ca_cert=tls.root_ca_cert, + job_data={ + 'kubernetes_sd_configs': [{'role': 'node'}], + 'scheme': 'https', + 'tls_config': {'ca_file': '__ca_file__'}, # placeholder for saved filename + 'bearer_token': get_token('system:prometheus'), + }) +``` + + + +# Reference + +* [common.md](common.md) + * [JobRequest](docs/common.md#jobrequest) + * [egress_subnets](docs/common.md#jobrequest-egress_subnets) + * [fromkeys](docs/common.md#jobrequest-fromkeys) + * [ingress_address](docs/common.md#jobrequest-ingress_address) + * [is_created](docs/common.md#jobrequest-is_created) + * [is_received](docs/common.md#jobrequest-is_received) + * [respond](docs/common.md#jobrequest-respond) + * [to_json](docs/common.md#jobrequest-to_json) + * [JobResponse](docs/common.md#jobresponse) + * [fromkeys](docs/common.md#jobresponse-fromkeys) +* [provides.md](provides.md) + * [PrometheusManualProvides](docs/provides.md#prometheusmanualprovides) + * [all_departed_units](docs/provides.md#prometheusmanualprovides-all_departed_units) + * [all_joined_units](docs/provides.md#prometheusmanualprovides-all_joined_units) + * [all_units](docs/provides.md#prometheusmanualprovides-all_units) + * [endpoint_name](docs/provides.md#prometheusmanualprovides-endpoint_name) + * [is_joined](docs/provides.md#prometheusmanualprovides-is_joined) + * [joined](docs/provides.md#prometheusmanualprovides-joined) + * [manage_flags](docs/provides.md#prometheusmanualprovides-manage_flags) + * [register_job](docs/provides.md#prometheusmanualprovides-register_job) + * [relations](docs/provides.md#prometheusmanualprovides-relations) + * [requests](docs/provides.md#prometheusmanualprovides-requests) + * [responses](docs/provides.md#prometheusmanualprovides-responses) +* [requires.md](requires.md) + * [PrometheusManualRequires](docs/requires.md#prometheusmanualrequires) + * [all_departed_units](docs/requires.md#prometheusmanualrequires-all_departed_units) + * [all_joined_units](docs/requires.md#prometheusmanualrequires-all_joined_units) + * [all_requests](docs/requires.md#prometheusmanualrequires-all_requests) + * [all_units](docs/requires.md#prometheusmanualrequires-all_units) + * [endpoint_name](docs/requires.md#prometheusmanualrequires-endpoint_name) + * [is_joined](docs/requires.md#prometheusmanualrequires-is_joined) + * [jobs](docs/requires.md#prometheusmanualrequires-jobs) + * [joined](docs/requires.md#prometheusmanualrequires-joined) + * [manage_flags](docs/requires.md#prometheusmanualrequires-manage_flags) + * [new_jobs](docs/requires.md#prometheusmanualrequires-new_jobs) + * [new_requests](docs/requires.md#prometheusmanualrequires-new_requests) + * [relations](docs/requires.md#prometheusmanualrequires-relations) + + + +# Contact Information + +Maintainer: Cory Johns <Cory.Johns@canonical.com> + + +[Juju]: https://jujucharms.com +[Prometheus Configuration Docs]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/ diff --git a/kata/hooks/relations/untrusted-container-runtime/__init__.py b/etcd/hooks/relations/prometheus-manual/__init__.py similarity index 100% rename from kata/hooks/relations/untrusted-container-runtime/__init__.py rename to etcd/hooks/relations/prometheus-manual/__init__.py diff --git a/etcd/hooks/relations/prometheus-manual/common.py b/etcd/hooks/relations/prometheus-manual/common.py new file mode 100644 index 0000000..530f965 --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/common.py @@ -0,0 +1,57 @@ +import json +from copy import deepcopy + +from charms.reactive import BaseRequest, BaseResponse, Field + + +class JobResponse(BaseResponse): + success = Field('Whether or not the registration succeeded') + reason = Field('If failed, a description of why') + + +class JobRequest(BaseRequest): + RESPONSE_CLASS = JobResponse + + job_name = Field('Desired name for the job. To ensure uniqueness, the ' + 'the request ID will be appended to the final job name.') + + job_data = Field('Config data for the job.') + + ca_cert = Field('Cert data for the CA used to validate connections.') + + def to_json(self, ca_file=None): + """ + Render the job request to JSON string which can be included directly + into Prometheus config. + + Keys will be sorted in the rendering to ensure a stable ordering for + comparisons to detect changes. + + If `ca_file` is given, it will be used to replace the value of any + `ca_file` fields in the job. The charm should ensure that the + request's `ca_cert` data is writen to that path prior to calling this + method. + """ + job_data = deepcopy(self.job_data) # make a copy we can modify + job_data['job_name'] = '{}-{}'.format(self.job_name, self.request_id) + + if ca_file: + for key, value in job_data.items(): + # update the cert path at the job level + if key == 'tls_config': + value['ca_file'] = str(ca_file) + + # update the cert path at the SD config level + if key.endswith('_sd_configs'): + for sd_config in value: + if 'ca_file' in sd_config.get('tls_config', {}): + sd_config['tls_config']['ca_file'] = str(ca_file) + + return json.dumps(job_data, sort_keys=True) + + def respond(self, success, reason=None): + """ + Acknowledge this request, and indicate success or failure with an + optional explanation. + """ + super().respond(success=success, reason=reason) diff --git a/etcd/hooks/relations/prometheus-manual/copyright b/etcd/hooks/relations/prometheus-manual/copyright new file mode 100644 index 0000000..69768db --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2019, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/etcd/hooks/relations/prometheus-manual/docs/common.md b/etcd/hooks/relations/prometheus-manual/docs/common.md new file mode 100644 index 0000000..a97d54b --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/docs/common.md @@ -0,0 +1,62 @@ +# `class JobRequest(BaseRequest)` + +Base class for requests using the request / response pattern. + +Subclasses **must** set the ``RESPONSE_CLASS`` attribute to a subclass of +the :class:`BaseResponse` which defines the fields that the response will +use. They must also define additional attributes as :class:`Field`s. + +For example:: + + class TLSResponse(BaseResponse): + key = Field('Private key for the cert') + cert = Field('Public cert info') + + + class TLSRequest(BaseRequest): + RESPONSE_CLASS = TLSResponse + + common_name = Field('Common Name (CN) for the cert to be created') + sans = Field('List of Subject Alternative Names (SANs)') + +## `egress_subnets` + +Subnets over which network traffic to the requester will flow. + +## `None` + +Returns a new dict with keys from iterable and values equal to value. + +## `ingress_address` + +Address to use if a connection to the requester is required. + +## `is_created` + +Whether this request was created by this side of the relation. + +## `is_received` + +Whether this request was received by the other side of the relation. + +## `def respond(self, success, reason=None)` + +Acknowledge this request, and indicate success or failure with an +optional explanation. + +## `def to_json(self)` + +Render the job request to JSON string which can be included directly +into Prometheus config. + +Keys will be sorted in the rendering to ensure a stable ordering for +comparisons to detect changes. + +# `class JobResponse(BaseResponse)` + +Base class for responses using the request / response pattern. + +## `None` + +Returns a new dict with keys from iterable and values equal to value. + diff --git a/etcd/hooks/relations/prometheus-manual/docs/provides.md b/etcd/hooks/relations/prometheus-manual/docs/provides.md new file mode 100644 index 0000000..439b4e5 --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/docs/provides.md @@ -0,0 +1,119 @@ +# `class PrometheusManualProvides(RequesterEndpoint)` + +Base class for Endpoints that create requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `def register_job(self, job_name, job_data, ca_cert=None)` + +Register a manual job. + +The job data should be the (unserialized) data defining the job. + +To ensure uniqueness, a UUID will be added to the job name, and it will +be injected into the job data. + +If a CA cert is given, the value of any ca_file field in the job data +will be replaced with a filename after the CA cert data is written, so +a placeholder value should be used. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + +## `requests` + +A list of all requests which have been submitted. + +## `responses` + +A list of all responses which have been received. + diff --git a/etcd/hooks/relations/prometheus-manual/docs/requires.md b/etcd/hooks/relations/prometheus-manual/docs/requires.md new file mode 100644 index 0000000..31a7e8f --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/docs/requires.md @@ -0,0 +1,117 @@ +# `class PrometheusManualRequires(ResponderEndpoint)` + +Base class for Endpoints that respond to requests in the request / response +pattern. + +Subclasses **must** set the ``REQUEST_CLASS`` attribute to a subclass +of :class:`BaseRequest` which defines the fields the request will use. + +## `all_departed_units` + +Collection of all units that were previously part of any relation on +this endpoint but which have since departed. + +This collection is persistent and mutable. The departed units will +be kept until they are explicitly removed, to allow for reasonable +cleanup of units that have left. + +Example: You need to run a command each time a unit departs the relation. + +.. code-block:: python + + @when('endpoint.{endpoint_name}.departed') + def handle_departed_unit(self): + for name, unit in self.all_departed_units.items(): + # run the command to remove `unit` from the cluster + # .. + self.all_departed_units.clear() + clear_flag(self.expand_name('departed')) + +Once a unit is departed, it will no longer show up in +:attr:`all_joined_units`. Note that units are considered departed as +soon as the departed hook is entered, which differs slightly from how +the Juju primitives behave (departing units are still returned from +``related-units`` until after the departed hook is complete). + +This collection is a :class:`KeyList`, so can be used as a mapping to +look up units by their unit name, or iterated or accessed by index. + +## `all_joined_units` + +A list view of all the units of all relations attached to this +:class:`~charms.reactive.endpoints.Endpoint`. + +This is actually a +:class:`~charms.reactive.endpoints.CombinedUnitsView`, so the units +will be in order by relation ID and then unit name, and you can access a +merged view of all the units' data as a single mapping. You should be +very careful when using the merged data collections, however, and +consider carefully what will happen when the endpoint has multiple +relations and multiple remote units on each. It is probably better to +iterate over each unit and handle its data individually. See +:class:`~charms.reactive.endpoints.CombinedUnitsView` for an +explanation of how the merged data collections work. + +Note that, because a given application might be related multiple times +on a given endpoint, units may show up in this collection more than +once. + +## `all_requests` + +A list of all requests, including ones which have been responded to. + +## `all_units` + +.. deprecated:: 0.6.1 + Use :attr:`all_joined_units` instead + +## `endpoint_name` + +Relation name of this endpoint. + +## `is_joined` + +Whether this endpoint has remote applications attached to it. + +## `jobs` + +Return a list of all jobs to be registered. + +## `joined` + +.. deprecated:: 0.6.3 + Use :attr:`is_joined` instead + +## `def manage_flags(self)` + +Method that subclasses can override to perform any flag management +needed during startup. + +This will be called automatically after the framework-managed automatic +flags have been updated. + +## `new_jobs` + +Return a list of new jobs to be registered. + +## `new_requests` + +A list of requests which have not been responded. + +Requests should be handled by the charm and then responded to by +calling ``request.respond(...)``. + +## `relations` + +Collection of :class:`Relation` instances that are established for +this :class:`Endpoint`. + +This is a :class:`KeyList`, so it can be iterated and indexed as a list, +or you can look up relations by their ID. For example:: + + rel0 = endpoint.relations[0] + assert rel0 is endpoint.relations[rel0.relation_id] + assert all(rel is endpoint.relations[rel.relation_id] + for rel in endpoint.relations) + print(', '.join(endpoint.relations.keys())) + diff --git a/etcd/hooks/relations/prometheus-manual/interface.yaml b/etcd/hooks/relations/prometheus-manual/interface.yaml new file mode 100644 index 0000000..5c324c6 --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/interface.yaml @@ -0,0 +1,6 @@ +name: prometheus-manual +summary: Interface for registering manual job definitions with Prometheus +version: 1 +maintainer: "Cory Johns " +exclude: + - .docs diff --git a/etcd/hooks/relations/prometheus-manual/provides.py b/etcd/hooks/relations/prometheus-manual/provides.py new file mode 100644 index 0000000..884629c --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/provides.py @@ -0,0 +1,41 @@ +from charms.reactive import ( + toggle_flag, + RequesterEndpoint, +) + +from .common import JobRequest + + +class PrometheusManualProvides(RequesterEndpoint): + REQUEST_CLASS = JobRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), + self.is_joined and self.requests) + + def register_job(self, job_name, job_data, ca_cert=None, relation=None): + """ + Register a manual job. + + The job data should be the (unserialized) data defining the job. + + To ensure uniqueness, a UUID will be added to the job name, and it will + be injected into the job data. + + If a CA cert is given, the value of any ca_file field in the job data + will be replaced with a filename after the CA cert data is written, so + a placeholder value should be used. + + If a specific relation is not given, the job will be registered with + every related Prometheus. + """ + # we might be connected to multiple prometheuses for some strange + # reason, so just send the job to all of them + relations = [relation] if relation is not None else self.relations + for relation in relations: + JobRequest.create_or_update(match_fields=['job_name'], + relation=relation, + job_name=job_name, + job_data=job_data, + ca_cert=ca_cert) diff --git a/etcd/hooks/relations/prometheus-manual/requires.py b/etcd/hooks/relations/prometheus-manual/requires.py new file mode 100644 index 0000000..a8d1acb --- /dev/null +++ b/etcd/hooks/relations/prometheus-manual/requires.py @@ -0,0 +1,31 @@ +from charms.reactive import ( + toggle_flag, + ResponderEndpoint, +) + +from .common import JobRequest + + +class PrometheusManualRequires(ResponderEndpoint): + REQUEST_CLASS = JobRequest + + def manage_flags(self): + super().manage_flags() + toggle_flag(self.expand_name('endpoint.{endpoint_name}.has_jobs'), + self.is_joined and self.jobs) + toggle_flag(self.expand_name('endpoint.{endpoint_name}.new_jobs'), + self.is_joined and self.new_jobs) + + @property + def jobs(self): + """ + Return a list of all jobs to be registered. + """ + return self.all_requests + + @property + def new_jobs(self): + """ + Return a list of new jobs to be registered. + """ + return self.new_requests diff --git a/etcd/layer.yaml b/etcd/layer.yaml index 0706020..8e820b6 100644 --- a/etcd/layer.yaml +++ b/etcd/layer.yaml @@ -13,6 +13,8 @@ - "layer:status" - "interface:etcd" - "interface:etcd-proxy" +- "interface:grafana-dashboard" +- "interface:prometheus-manual" "exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests", ".tox", "__pycache__", "Makefile", ".coverage"] "options": diff --git a/etcd/lib/charms/layer/basic.py b/etcd/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/etcd/lib/charms/layer/basic.py +++ b/etcd/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/etcd/lib/charms/layer/snap.py b/etcd/lib/charms/layer/snap.py index 88b8d89..06cc4b1 100644 --- a/etcd/lib/charms/layer/snap.py +++ b/etcd/lib/charms/layer/snap.py @@ -300,7 +300,15 @@ def get_installed_channel(snapname): hookenv.WARNING, ) return - return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + try: + return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + except Exception as e: + # If it fails to get the channel information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap tracking (channel): {}".format(e), + hookenv.WARNING, + ) + return def _snap_args( @@ -351,25 +359,28 @@ def _install_store(snapname, **kw): cmd.append(snapname) hookenv.log("Installing {} from store".format(snapname)) - for attempt in tenacity.Retrying( + # Use tenacity decorator for Trusty support (See LP Bug #1934163) + @tenacity.retry( wait=tenacity.wait_fixed(10), # seconds stop=tenacity.stop_after_attempt(3), reraise=True, - ): - with attempt: - try: - out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - hookenv.log( - 'Installation successful cmd="{}" output="{}"'.format(cmd, out), - level=hookenv.DEBUG, - ) - reactive.clear_flag(get_local_flag(snapname)) - except subprocess.CalledProcessError as cp: - hookenv.log( - 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), - level=hookenv.ERROR, - ) - raise + ) + def _run_install(): + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + hookenv.log( + 'Installation successful cmd="{}" output="{}"'.format(cmd, out), + level=hookenv.DEBUG, + ) + reactive.clear_flag(get_local_flag(snapname)) + except subprocess.CalledProcessError as cp: + hookenv.log( + 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), + level=hookenv.ERROR, + ) + raise + + _run_install() def _refresh_store(snapname, **kw): diff --git a/etcd/lib/etcd_lib.py b/etcd/lib/etcd_lib.py index 12ef193..ec1a5d8 100644 --- a/etcd/lib/etcd_lib.py +++ b/etcd/lib/etcd_lib.py @@ -1,4 +1,12 @@ -from charmhelpers.core.hookenv import network_get, unit_private_ip +from charmhelpers.contrib.templating.jinja import render +from charmhelpers.core.hookenv import ( + network_get, + unit_private_ip, +) + +import json + +GRAFANA_DASHBOARD_FILE = 'grafana_dashboard.json.j2' def get_ingress_addresses(endpoint_name): @@ -66,3 +74,17 @@ def get_bind_address(endpoint_name): return bind_addresses[0]['addresses'][0]['address'] return unit_private_ip() + + +def render_grafana_dashboard(datasource): + """Load grafana dashboard json model and insert prometheus datasource. + + :param datasource: name of the 'prometheus' application that will be used + as datasource in grafana dashboard + :return: Grafana dashboard json model as a dict. + """ + datasource = "{} - Juju generated source".format(datasource) + jinja_args = {'variable_start_string': '<<', 'variable_end_string': '>>'} + return json.loads(render(GRAFANA_DASHBOARD_FILE, + {'datasource': datasource}, + jinja_env_args=jinja_args)) diff --git a/etcd/metadata.yaml b/etcd/metadata.yaml index 437060b..b333b27 100644 --- a/etcd/metadata.yaml +++ b/etcd/metadata.yaml @@ -29,6 +29,10 @@ "interface": "etcd" "proxy": "interface": "etcd-proxy" + "prometheus": + "interface": "prometheus-manual" + "grafana": + "interface": "grafana-dashboard" "peers": "cluster": "interface": "etcd" @@ -40,11 +44,17 @@ "core": "type": "file" "filename": "core.snap" - "description": "Snap package of core" + "description": | + Snap package of core + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "etcd": "type": "file" "filename": "etcd.snap" - "description": "Snap package of etcd" + "description": | + Snap package of etcd + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "storage": "data": "type": "block" diff --git a/etcd/reactive/etcd.py b/etcd/reactive/etcd.py index 4a24d80..30ca1be 100644 --- a/etcd/reactive/etcd.py +++ b/etcd/reactive/etcd.py @@ -11,14 +11,19 @@ from charms.reactive import when_not from charms.reactive import is_state from charms.reactive import set_state from charms.reactive import is_flag_set -from charms.reactive import clear_flag from charms.reactive import remove_state +from charms.reactive import set_flag +from charms.reactive import clear_flag from charms.reactive import hook +from charms.reactive import register_trigger from charms.reactive.helpers import data_changed from charms.templating.jinja2 import render +from charmhelpers.core.hookenv import config from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import DEBUG + from charmhelpers.core.hookenv import leader_set from charmhelpers.core.hookenv import leader_get from charmhelpers.core.hookenv import storage_get @@ -36,7 +41,11 @@ from charms.layer import status from etcdctl import EtcdCtl from etcdctl import get_connection_string from etcd_databag import EtcdDatabag -from etcd_lib import get_ingress_address, get_ingress_addresses +from etcd_lib import ( + get_ingress_address, + get_ingress_addresses, + render_grafana_dashboard, +) from shlex import split from subprocess import check_call @@ -44,6 +53,7 @@ from subprocess import check_output from subprocess import CalledProcessError from shutil import copyfile +import json import os import charms.leadership # noqa import socket @@ -66,6 +76,13 @@ import random # default regex in charmhelpers doesn't allow periods, but nagios itself does. nrpe.Check.shortname_re = r'[\.A-Za-z0-9-_]+$' +GRAFANA_DASHBOARD_NAME = 'etcd' + +register_trigger(when_not="endpoint.grafana.joined", clear_flag="grafana.configured") +register_trigger(when_not="endpoint.prometheus.joined", + clear_flag="prometheus.configured") +register_trigger(when_not="endpoint.prometheus.joined", clear_flag="grafana.configured") + def get_target_etcd_channel(): """ @@ -821,6 +838,66 @@ def remove_nrpe_config(nagios=None): nrpe_setup.remove_check(shortname=service) +@when('endpoint.prometheus.joined', + 'leadership.is_leader', + 'certificates.ca.available') +def register_prometheus_jobs(): + # This function is not guarded with `when_not("prometheus.configured")` + # to account for possible changes of etcd units IP adresses and for when + # etcd units are added/removed. Repeated calls to `prometheus.register_job()` + # have no effect unless job_data changes. + log('Registering Prometheus metrics collection.') + prometheus = endpoint_from_flag('endpoint.prometheus.joined') + cluster = endpoint_from_flag('cluster.joined') + + peer_ips = cluster.get_db_ingress_addresses() if cluster else [] + peer_ips.append(get_ingress_address('db')) + targets = ["{}:{}".format(ip, config('port')) for ip in peer_ips] + log('Configuring Prometheus scrape targets: {}'.format(targets), DEBUG) + prometheus.register_job(job_name='etcd', + job_data={ + 'scheme': 'https', + 'static_configs': [ + {'targets': targets}, + ] + }) + set_flag('prometheus.configured') + + +@when( + "prometheus.configured", + "endpoint.grafana.joined", + "leadership.is_leader" +) +@when_not("grafana.configured") +def register_grafana_dashboard(): + log("Configuring grafana dashboard", level=hookenv.INFO) + grafana = endpoint_from_flag("endpoint.grafana.joined") + prometheus = endpoint_from_flag('endpoint.prometheus.joined') + + if not prometheus: + log( + "Prometheus relation not available. Skipping Grafana" + " configuration.", hookenv.WARNING) + return + + if len(prometheus.relations) > 1: + log( + "Multiple prometheus relations detected. Default Grafana dashboard" + " will configure only with one of them as datasource.", + hookenv.WARNING) + + datasource = prometheus.relations[0].application_name + dashboard = render_grafana_dashboard(datasource) + + log("Rendered Grafana dashboard:\n{}".format(json.dumps(dashboard)), + level=hookenv.DEBUG) + grafana.register_dashboard(name=GRAFANA_DASHBOARD_NAME, + dashboard=dashboard) + log('Grafana dashboard "{}" registered.'.format(GRAFANA_DASHBOARD_NAME)) + set_flag("grafana.configured") + + def volume_is_mounted(volume): ''' Takes a hardware path and returns true/false if it is mounted ''' cmd = ['df', '-t', 'ext4'] diff --git a/etcd/templates/grafana_dashboard.json.j2 b/etcd/templates/grafana_dashboard.json.j2 new file mode 100644 index 0000000..e6f1046 --- /dev/null +++ b/etcd/templates/grafana_dashboard.json.j2 @@ -0,0 +1,1124 @@ +{ + "__inputs": [], + "annotations": { + "list": [] + }, + "description": "etcd sample Grafana dashboard with Prometheus", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 28, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(etcd_server_has_leader)", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Up", + "type": "singlestat", + "valueFontSize": "200%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Rate", + "metric": "grpc_server_started_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Failed Rate", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "id": 41, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Watch Streams", + "metric": "grpc_server_handled_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Lease Streams", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_debugging_mvcc_db_total_size_in_bytes", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} DB Size", + "metric": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "DB Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}} WAL fsync", + "metric": "etcd_disk_wal_fsync_duration_seconds_bucket", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} DB fsync", + "metric": "etcd_disk_backend_commit_duration_seconds_bucket", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "id": 29, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Resident Memory", + "metric": "process_resident_memory_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 5, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_received_bytes_total[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Client Traffic In", + "metric": "etcd_network_client_grpc_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 5, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_sent_bytes_total[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Client Traffic Out", + "metric": "etcd_network_client_grpc_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Peer Traffic In", + "metric": "etcd_network_peer_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Peer Traffic Out", + "metric": "etcd_network_peer_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "editable": true, + "error": false, + "fill": 0, + "id": 40, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_failed_total[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Proposal Failure Rate", + "metric": "etcd_server_proposals_failed_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(etcd_server_proposals_pending)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Proposal Pending Total", + "metric": "etcd_server_proposals_pending", + "refId": "B", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_committed_total[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Proposal Commit Rate", + "metric": "etcd_server_proposals_committed_total", + "refId": "C", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Proposal Apply Rate", + "refId": "D", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "<< datasource >>", + "decimals": 0, + "editable": true, + "error": false, + "fill": 0, + "id": 19, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "changes(etcd_server_leader_changes_seen_total[1d])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Total Leader Elections Per Day", + "metric": "etcd_server_leader_changes_seen_total", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Leader Elections Per Day", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "New row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "etcd", + "version": 2 +} diff --git a/etcd/tox.ini b/etcd/tox.ini index a89395f..ea08877 100644 --- a/etcd/tox.ini +++ b/etcd/tox.ini @@ -10,11 +10,14 @@ basepython = python3 setenv = PYTHONPATH={toxinidir}:{toxinidir}/lib deps = + jinja2 + netaddr<=0.7.19 pyyaml pytest + pytest-mock pytest-cov flake8 - git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test + charms.unit_test commands = pytest --cov-report term-missing \ --cov lib --cov-fail-under 33 \ diff --git a/etcd/unit_tests/lib/test_etcd_lib.py b/etcd/unit_tests/lib/test_etcd_lib.py new file mode 100644 index 0000000..174fdad --- /dev/null +++ b/etcd/unit_tests/lib/test_etcd_lib.py @@ -0,0 +1,19 @@ +from charmhelpers.contrib.templating import jinja + +from etcd_lib import render_grafana_dashboard + + +def test_render_grafana_dashboard(): + """Test loading of Grafana dashboard.""" + datasource = 'prometheus' + raw_template = ('{{"panels": [{{"datasource": "{} - ' + 'Juju generated source"}}]}}'.format(datasource)) + expected_dashboard = { + 'panels': [ + {'datasource': '{} - Juju generated source'.format(datasource)} + ]} + + jinja.render.return_value = raw_template + rendered_dashboard = render_grafana_dashboard(datasource) + + assert rendered_dashboard == expected_dashboard diff --git a/etcd/unit_tests/test_etcdctl.py b/etcd/unit_tests/test_etcdctl.py index 5a36be7..b766fc7 100644 --- a/etcd/unit_tests/test_etcdctl.py +++ b/etcd/unit_tests/test_etcdctl.py @@ -1,5 +1,7 @@ import pytest -from unittest.mock import patch +from unittest.mock import patch, MagicMock + +import reactive.etcd from etcdctl import ( EtcdCtl, @@ -10,13 +12,17 @@ from etcdctl import ( from etcd_databag import EtcdDatabag from reactive.etcd import ( - pre_series_upgrade, - post_series_upgrade, - status, clear_flag, - host, + endpoint_from_flag, force_rejoin_requested, force_rejoin, + GRAFANA_DASHBOARD_NAME, + host, + pre_series_upgrade, + post_series_upgrade, + register_grafana_dashboard, + register_prometheus_jobs, + status, ) @@ -110,6 +116,44 @@ class TestEtcdCtl: 'https://1.1.1.1:1111' ) + @patch('reactive.etcd.render_grafana_dashboard') + def test_register_grafana_dashboard(self, mock_dashboard_render): + """Register grafana dashboard.""" + dashboard_json = {'foo': 'bar'} + mock_dashboard_render.return_value = dashboard_json + grafana = MagicMock() + endpoint_from_flag.return_value = grafana + + register_grafana_dashboard() + + mock_dashboard_render.assert_called_once() + grafana.register_dashboard.assert_called_with( + name=GRAFANA_DASHBOARD_NAME, dashboard=dashboard_json) + reactive.etcd.set_flag.assert_called_with('grafana.configured') + + def test_register_prometheus_job(self, mocker): + """Test successful registration of prometheus job.""" + ingress_address = '10.0.0.1' + port = '2379' + targets = ['{}:{}'.format(ingress_address, port)] + prometheus_mock = MagicMock() + etcd_cluster_mock = MagicMock() + job_data = {'scheme': 'https', + 'static_configs': [{'targets': targets}] + } + + etcd_cluster_mock.get_db_ingress_addresses.return_value = [] + endpoint_from_flag.side_effect = [prometheus_mock, etcd_cluster_mock] + mocker.patch.object(reactive.etcd, 'get_ingress_address', + return_value=ingress_address) + reactive.etcd.config.return_value = port + + register_prometheus_jobs() + + prometheus_mock.register_job.assert_called_with(job_name='etcd', + job_data=job_data) + reactive.etcd.set_flag.assert_called_with('prometheus.configured') + def test_series_upgrade(self): assert host.service_pause.call_count == 0 assert host.service_resume.call_count == 0 diff --git a/etcd/version b/etcd/version index 1dea0b1..20817dd 100644 --- a/etcd/version +++ b/etcd/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/etcd/wheelhouse.txt b/etcd/wheelhouse.txt index 15a0aab..055e9cd 100644 --- a/etcd/wheelhouse.txt +++ b/etcd/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 @@ -15,7 +17,10 @@ wheel<0.34 # netaddr<=0.7.19 # overridden by etcd # layer:snap -tenacity +# Newer versions of tenacity rely on `typing` which is in stdlib in +# python3.5 but not python3.4. We want to continue to support +# python3.4 (Trusty) +tenacity<5.0.4 # etcd charms.templating.jinja2>=1.0.0,<2.0.0 diff --git a/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz b/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/etcd/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/etcd/wheelhouse/charmhelpers-0.20.23.tar.gz b/etcd/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/etcd/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/etcd/wheelhouse/pyaml-20.4.0.tar.gz b/etcd/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/etcd/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/etcd/wheelhouse/pyaml-21.10.1.tar.gz b/etcd/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/etcd/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/etcd/wheelhouse/six-1.15.0.tar.gz b/etcd/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/etcd/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/etcd/wheelhouse/six-1.16.0.tar.gz b/etcd/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/etcd/wheelhouse/six-1.16.0.tar.gz differ diff --git a/etcd/wheelhouse/tenacity-5.0.3.tar.gz b/etcd/wheelhouse/tenacity-5.0.3.tar.gz new file mode 100644 index 0000000..c7d05ba Binary files /dev/null and b/etcd/wheelhouse/tenacity-5.0.3.tar.gz differ diff --git a/etcd/wheelhouse/tenacity-7.0.0.tar.gz b/etcd/wheelhouse/tenacity-7.0.0.tar.gz deleted file mode 100644 index 2050c4d..0000000 Binary files a/etcd/wheelhouse/tenacity-7.0.0.tar.gz and /dev/null differ diff --git a/kata/.build.manifest b/flannel/.build.manifest similarity index 50% rename from kata/.build.manifest rename to flannel/.build.manifest index 729b0c1..f81fea0 100644 --- a/kata/.build.manifest +++ b/flannel/.build.manifest @@ -1,39 +1,54 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/stable", - "rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", + "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", + "url": "layer:nagios" + }, + { + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/stable", - "rev": "b2fa345285b14fe339084fd35865973ca05eefbf", - "url": "kata" + "branch": "refs/heads/master", + "rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156", + "url": "layer:kubernetes-common" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", - "url": "interface:container-runtime" + "branch": "refs/heads/master", + "rev": "a0b41eeb5837bc087a7c0d32b8e23682566cb2ad", + "url": "flannel" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "b59ce0c44bc52c789175750ce18b42f76c9a4578", - "url": "interface:untrusted-container-runtime" + "branch": "refs/heads/master", + "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382", + "url": "interface:etcd" + }, + { + "branch": "refs/heads/master", + "rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1", + "url": "interface:kubernetes-cni" + }, + { + "branch": "refs/heads/master", + "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc", + "url": "interface:nrpe-external-master" } ], "signatures": { @@ -42,30 +57,35 @@ "dynamic", "unchecked" ], - ".gitignore": [ - "kata", + ".github/workflows/main.yml": [ + "layer:kubernetes-common", "static", - "589384c900fb8e573ae6939a9efa0813087ea526761ba661d96aa2526a494eef" + "d4f8fec0456cb2fc05993253a995983488a76fbbef10c2ee40649e83d6c9e078" + ], + ".github/workflows/tests.yaml": [ + "flannel", + "static", + "5476786d9ace5356136858f2cfcfcf8dcfdf2add3be89a0de7175d5c726203ff" + ], + ".gitignore": [ + "flannel", + "static", + "eec008c35119baa5e06882e52f99a510b5773931f1ca829a80d99e8ca751669f" ], ".travis.yml": [ - "kata", + "flannel", "static", - "714ed5453bd5a053676efb64370194a7c130f426ec11acba7d1509d558dc979c" - ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" + "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" ], "CONTRIBUTING.md": [ - "kata", + "flannel", "static", - "c44755a6800e330bd939b7a27a4bb75adaef3a1ccdc15df62cb5533a3ea6252f" + "1e1138fc9658719db34ae11a62f017b6a02bad466011f306cd62667c9c49fdd7" ], "LICENSE": [ - "kata", + "flannel", "static", - "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd" ], "Makefile": [ "layer:basic", @@ -73,9 +93,9 @@ "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" ], "README.md": [ - "kata", + "flannel", "static", - "ac3b4f06b6e4a23f80a12f898645c4d4c2daedf961e72a2d851cf9c4b37d538a" + "365e1cde559f36067414a90405953571c74613697de8ff8d9d8b2ff0ffb0d3db" ], "actions.yaml": [ "layer:debug", @@ -97,21 +117,41 @@ "static", "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc" ], - "copyright": [ - "layer:status", + "build-flannel-resources.sh": [ + "flannel", "static", - "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + "995fe25171d34a787cef1189d8df5e1f3575041a6f89162ec928d56f60b5917d" + ], + "config.yaml": [ + "flannel", + "dynamic", + "56168ff734eedffe5b838c2f60fc797fb4f247c3a734549885b474ddf0c71423" + ], + "copyright": [ + "flannel", + "static", + "9c53958dbdcd6526c71fbe4d6eb5c1d03980e39b1e4259525dea16e91f00d68e" ], "copyright.layer-basic": [ "layer:basic", "static", "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" ], + "copyright.layer-nagios": [ + "layer:nagios", + "static", + "47b2363574909e748bcc471d9004780ac084b301c154905654b5b6f088474749" + ], "copyright.layer-options": [ "layer:options", "static", "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" ], + "copyright.layer-status": [ + "layer:status", + "static", + "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58" + ], "debug-scripts/charm-unitdata": [ "layer:debug", "static", @@ -157,32 +197,57 @@ "static", "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a" ], + "hooks/cni-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/cni-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/config-changed": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/containerd-relation-broken": [ + "hooks/etcd-relation-broken": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/containerd-relation-changed": [ + "hooks/etcd-relation-changed": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/containerd-relation-created": [ + "hooks/etcd-relation-created": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/containerd-relation-departed": [ + "hooks/etcd-relation-departed": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/containerd-relation-joined": [ + "hooks/etcd-relation-joined": [ "layer:basic", "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" @@ -207,6 +272,31 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], + "hooks/nrpe-external-master-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/nrpe-external-master-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/post-series-upgrade": [ "layer:basic", "dynamic", @@ -217,75 +307,105 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/relations/container-runtime/.gitignore": [ - "interface:container-runtime", + "hooks/relations/etcd/.gitignore": [ + "interface:etcd", "static", - "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732" + "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e" ], - "hooks/relations/container-runtime/LICENSE": [ - "interface:container-runtime", + "hooks/relations/etcd/README.md": [ + "interface:etcd", "static", - "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + "93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386" ], - "hooks/relations/container-runtime/README.md": [ - "interface:container-runtime", - "static", - "44273265818229d2c858c3af0e0eee3a7df05aaa9ab20d28c3872190d4b48611" - ], - "hooks/relations/container-runtime/__init__.py": [ - "interface:container-runtime", + "hooks/relations/etcd/__init__.py": [ + "interface:etcd", "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "hooks/relations/container-runtime/interface.yaml": [ - "interface:container-runtime", + "hooks/relations/etcd/interface.yaml": [ + "interface:etcd", "static", - "e5343dcb11a6817a6050df4ea1c463eeaa0dd4777098566d4e27b056775426c6" + "ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be" ], - "hooks/relations/container-runtime/provides.py": [ - "interface:container-runtime", + "hooks/relations/etcd/peers.py": [ + "interface:etcd", "static", - "4e818da222f507604179a828629787a1250083c847277f6b5b8e028cfbbb6d06" + "99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834" ], - "hooks/relations/container-runtime/requires.py": [ - "interface:container-runtime", + "hooks/relations/etcd/provides.py": [ + "interface:etcd", "static", - "95285168b02f1f70be15c03098833a85e60fa1658ed72a46acd42e8e85ded761" + "3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9" ], - "hooks/relations/untrusted-container-runtime/.gitignore": [ - "interface:untrusted-container-runtime", + "hooks/relations/etcd/requires.py": [ + "interface:etcd", "static", - "a2ebfecdb6c1b58267fbe97e6e2ac02c2b963df7673fc1047270f0f0cff16732" + "8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e" ], - "hooks/relations/untrusted-container-runtime/LICENSE": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", "static", - "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4" + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" ], - "hooks/relations/untrusted-container-runtime/README.md": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/.gitignore": [ + "interface:kubernetes-cni", "static", - "e3dc7db9ee98b716cb9a3a281fad88ca313bc11888a0da2f4b63c4306d91b64f" + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" ], - "hooks/relations/untrusted-container-runtime/__init__.py": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/README.md": [ + "interface:kubernetes-cni", "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "hooks/relations/untrusted-container-runtime/interface.yaml": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/__init__.py": [ + "interface:kubernetes-cni", "static", - "1fcb0305295206dc2b9926bf1870cae2c6cd8eee6eef72b6060c85e4f2109a45" + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "hooks/relations/untrusted-container-runtime/provides.py": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/interface.yaml": [ + "interface:kubernetes-cni", "static", - "05a52be7ad18df5cac9fb5dcc27c2ab24fe12e65fa809e0ea4d395dbcb36e6f2" + "03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d" ], - "hooks/relations/untrusted-container-runtime/requires.py": [ - "interface:untrusted-container-runtime", + "hooks/relations/kubernetes-cni/provides.py": [ + "interface:kubernetes-cni", "static", - "958e03e254ee27bee761a6af3e032a273204b356dc51438489cde726b1a6e060" + "e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee" + ], + "hooks/relations/kubernetes-cni/requires.py": [ + "interface:kubernetes-cni", + "static", + "45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe" + ], + "hooks/relations/kubernetes-cni/tox.ini": [ + "interface:kubernetes-cni", + "static", + "f08626c9b65362031edb07f96f15f101bc3dda075abc64f54d1c83efd2c05e39" + ], + "hooks/relations/nrpe-external-master/README.md": [ + "interface:nrpe-external-master", + "static", + "d8ed3bc7334f6581b12b6091923f58e6f5ef62075a095a4e78fb8f434a948636" + ], + "hooks/relations/nrpe-external-master/__init__.py": [ + "interface:nrpe-external-master", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/nrpe-external-master/interface.yaml": [ + "interface:nrpe-external-master", + "static", + "894f24ba56148044dae5b7febf874b427d199239bcbe1f2f55c3db06bb77b5f0" + ], + "hooks/relations/nrpe-external-master/provides.py": [ + "interface:nrpe-external-master", + "static", + "e6ba708d05b227b139a86be59c83ed95a2bad030bc81e5819167ba5e1e67ecd4" + ], + "hooks/relations/nrpe-external-master/requires.py": [ + "interface:nrpe-external-master", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], "hooks/start": [ "layer:basic", @@ -297,31 +417,6 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/untrusted-relation-broken": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/untrusted-relation-changed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/untrusted-relation-created": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/untrusted-relation-departed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/untrusted-relation-joined": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], "hooks/update-status": [ "layer:basic", "dynamic", @@ -333,14 +428,19 @@ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], "icon.svg": [ - "kata", + "flannel", "static", - "d20624e9389af6506a8d8a69ac9bba4d41709601b624c0875fd7d6717b395088" + "bb6bcf05faa5952b889c356c9ffca6fd5082657efac85626713249ae218f763b" ], "layer.yaml": [ - "kata", + "flannel", "dynamic", - "599574e1d3dda3bf1d63047ac0b152caffcf22058e2f61370a37c8bb89317e4c" + "3e018cc6317096a1482ca753551a00c05e8ead7c2ab61809e740ab84f9ac0e3d" + ], + "lib/charms/flannel/common.py": [ + "flannel", + "static", + "e6f58d426cf7547eb9ab2169bea3628f048513ca77c7f9dfea50d8b452ec0e9f" ], "lib/charms/layer/__init__.py": [ "layer:basic", @@ -350,13 +450,23 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", "static", "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d" ], + "lib/charms/layer/kubernetes_common.py": [ + "layer:kubernetes-common", + "static", + "29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b" + ], + "lib/charms/layer/nagios.py": [ + "layer:nagios", + "static", + "0246710bdbea844356007a64409907d93e6e94a289d83266e8b7c5d921fb3a6c" + ], "lib/charms/layer/options.py": [ "layer:options", "static", @@ -378,9 +488,9 @@ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1" ], "metadata.yaml": [ - "kata", + "flannel", "dynamic", - "883f95d6180166d507365b3374b733fde27e0eb988d9532e88bb66e002c3fd68" + "009fb9e888c9b434913f153901ef4d419d56b8d94e3a1ca241e1417f48a3c822" ], "pydocmd.yml": [ "layer:status", @@ -392,10 +502,10 @@ "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "reactive/kata.py": [ - "kata", + "reactive/flannel.py": [ + "flannel", "static", - "7863484c83034271ea1f7a645c9f904405047db1be0fd7857f80008f47e073bf" + "a13f33c694500f7bd00265f9db82492b2009e469295f9dca706dbb939702d795" ], "reactive/status.py": [ "layer:status", @@ -407,30 +517,75 @@ "static", "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" ], - "tests/conftest.py": [ - "kata", + "templates/10-flannel.conflist": [ + "flannel", + "static", + "257223dfc7fde23c0adb75f21484cdb4f35dfc2b34bd905f09931dff8038c651" + ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], + "templates/flannel.service": [ + "flannel", + "static", + "c22a91a5da6db0079717143ae95d4bbe95734c9d04f87d12ddd6ae1e3a5d9bd7" + ], + "tests/data/bundle.yaml": [ + "flannel", + "static", + "ff7247c127db371fa12d510ab470a0d82070e62e2a7087e3cc84021e9c6a0a5a" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", "static", "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" ], - "tests/test_kata_reactive.py": [ - "kata", + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", "static", - "24d714d03b6f2c2faa67ecdbd7d102f700087973eb5c98d7b9c8e5542d61541c" + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/integration/conftest.py": [ + "flannel", + "static", + "92e2e5f765bbc9b6b6f394bac2899878b5e3e78615692dcd6fef218381ef8f20" + ], + "tests/integration/test_flannel_integration.py": [ + "flannel", + "static", + "841fc0d23642fa78e623dc1ceb6765676144205f981d7d3d384acaf8203ee6ef" + ], + "tests/unit/conftest.py": [ + "flannel", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/unit/test_flannel.py": [ + "flannel", + "static", + "a017d5b4edb16c9e94a0b017905b7ff74f953298bab0fb5a38d4bdaa3090c230" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4" ], "tox.ini": [ - "kata", + "flannel", "static", - "b04898a3c4de3bf48ca4363751048ec83ed185bc27af7d956ae799d88d3827ab" + "3c97b60f08edb8f03cddc1779cc8f57472169f0170dd5a0c98169c0b9953bab6" ], "version": [ - "kata", + "flannel", "dynamic", - "f6c325fd13ee5c726bc2e631996963198f2cfbaa50599b4962b151630fa86cf4" + "ee92bae3de0e84508e2008c42996c64f7c7728c2eafcb21d2efa1b534b1e2939" ], "wheelhouse.txt": [ - "kata", + "flannel", "dynamic", - "425000e4406bf00f663cf41789c409e7980e4bd4a1b557b0470770502f71ed09" + "c02d05375f2be2cb514cab90f7ef4e9b688e372cc42d3f29bf4e0a9ad27be62f" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -452,30 +607,25 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/certifi-2021.5.30.tar.gz": [ - "__pip__", - "dynamic", - "2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee" - ], - "wheelhouse/charmhelpers-0.20.22.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", "dynamic", "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616" ], - "wheelhouse/charset-normalizer-2.0.3.tar.gz": [ - "__pip__", + "wheelhouse/charms.templating.jinja2-1.0.2.tar.gz": [ + "flannel", "dynamic", - "c46c3ace2d744cfbdebceaa3c19ae691f53ae621b39fd7570f59d14fb7f2fd12" + "8193c6a1d40bdb66fe272c359b4e4780501c658acfaf2b1118c4230927815fe2" ], - "wheelhouse/idna-3.2.tar.gz": [ - "__pip__", + "wheelhouse/dnspython-1.16.0.zip": [ + "flannel", "dynamic", - "467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3" + "36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01" ], "wheelhouse/netaddr-0.7.19.tar.gz": [ "layer:basic", @@ -492,15 +642,15 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], - "wheelhouse/requests-2.26.0.tar.gz": [ - "kata", + "wheelhouse/python-etcd-0.4.5.tar.gz": [ + "__pip__", "dynamic", - "b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7" + "f1b5ebb825a3e8190494f5ce1509fde9069f2754838ed90402a8c11e1f52b8cb" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -517,10 +667,10 @@ "dynamic", "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], - "wheelhouse/urllib3-1.26.6.tar.gz": [ + "wheelhouse/urllib3-1.26.7.tar.gz": [ "__pip__", "dynamic", - "f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f" + "4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", diff --git a/flannel/.github/workflows/main.yml b/flannel/.github/workflows/main.yml new file mode 100644 index 0000000..6768aef --- /dev/null +++ b/flannel/.github/workflows/main.yml @@ -0,0 +1,22 @@ +name: Test Suite +on: [pull_request] + +jobs: + tests: + name: Lint, Unit, & Func Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox diff --git a/flannel/.github/workflows/tests.yaml b/flannel/.github/workflows/tests.yaml new file mode 100644 index 0000000..f6436d9 --- /dev/null +++ b/flannel/.github/workflows/tests.yaml @@ -0,0 +1,42 @@ +name: Run tests with Tox + +on: [push] + +jobs: + unit-tests: + name: Lint, Unit Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run Tox + run: tox # Run tox using the version of Python in `PATH` + + integration-tests: + name: Integration test with LXD + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + with: + provider: lxd + - name: Install docker + run: sudo snap install docker + - name: Build flannel resources + run: ARCH=amd64 sudo ./build-flannel-resources.sh + - name: Run integration test + run: tox -e integration diff --git a/kata/.gitignore b/flannel/.gitignore similarity index 52% rename from kata/.gitignore rename to flannel/.gitignore index 8003a31..b9d40ba 100644 --- a/kata/.gitignore +++ b/flannel/.gitignore @@ -1,4 +1,4 @@ -__pycache__/ -.coverage .tox/ -.venv/ +__pycache__/ +*.pyc +*.tar.gz diff --git a/calico/hooks/relations/kubernetes-cni/.travis.yml b/flannel/.travis.yml similarity index 100% rename from calico/hooks/relations/kubernetes-cni/.travis.yml rename to flannel/.travis.yml diff --git a/kata/CONTRIBUTING.md b/flannel/CONTRIBUTING.md similarity index 87% rename from kata/CONTRIBUTING.md rename to flannel/CONTRIBUTING.md index 7a8f252..0213ca4 100644 --- a/kata/CONTRIBUTING.md +++ b/flannel/CONTRIBUTING.md @@ -24,7 +24,7 @@ We have adopted the Ubuntu code of Conduct. You can read this in full [here](htt To contribute code to this project, please use the following workflow: -1. [Submit a bug][bug] to explain the need for and track the change. +1. [Submit a bug](https://bugs.launchpad.net/charm-flannel/+filebug) to explain the need for and track the change. 2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code. 3. Submit a PR. The PR description should include a link to the bug on Launchpad. 4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag. @@ -34,8 +34,4 @@ To contribute code to this project, please use the following workflow: ## Documentation Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs. -See [this page][docs] - - -[bug]: https://bugs.launchpad.net/charm-kata/+filebug -[docs]: https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-kata.md \ No newline at end of file +See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-flannel.md) diff --git a/kata/hooks/relations/untrusted-container-runtime/LICENSE b/flannel/LICENSE similarity index 99% rename from kata/hooks/relations/untrusted-container-runtime/LICENSE rename to flannel/LICENSE index 261eeb9..7a4a3ea 100644 --- a/kata/hooks/relations/untrusted-container-runtime/LICENSE +++ b/flannel/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -198,4 +199,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/kata/Makefile b/flannel/Makefile similarity index 100% rename from kata/Makefile rename to flannel/Makefile diff --git a/flannel/README.md b/flannel/README.md new file mode 100644 index 0000000..f87452d --- /dev/null +++ b/flannel/README.md @@ -0,0 +1,25 @@ +# Flannel Charm + +Flannel is a virtual network that gives a subnet to each host for use with +container runtimes. + +This charm will deploy flannel as a background service, and configure CNI for +use with flannel, on any principal charm that implements the +[`kubernetes-cni`](https://github.com/juju-solutions/interface-kubernetes-cni) interface. + +This charm is maintained along with the components of Charmed Kubernetes. For full information, +please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-flannel). + +# Developers + +## Building the charm + +``` +charm build -o +``` + +## Building the flannel resources + +``` +./build-flannel-resources.sh +``` \ No newline at end of file diff --git a/kata/actions.yaml b/flannel/actions.yaml similarity index 100% rename from kata/actions.yaml rename to flannel/actions.yaml diff --git a/kata/actions/debug b/flannel/actions/debug similarity index 100% rename from kata/actions/debug rename to flannel/actions/debug diff --git a/kata/bin/charm-env b/flannel/bin/charm-env similarity index 100% rename from kata/bin/charm-env rename to flannel/bin/charm-env diff --git a/kata/bin/layer_option b/flannel/bin/layer_option similarity index 100% rename from kata/bin/layer_option rename to flannel/bin/layer_option diff --git a/flannel/build-flannel-resources.sh b/flannel/build-flannel-resources.sh new file mode 100755 index 0000000..403d5aa --- /dev/null +++ b/flannel/build-flannel-resources.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +set -eux + +FLANNEL_VERSION=${FLANNEL_VERSION:-"v0.11.0"} +ETCD_VERSION=${ETCD_VERSION:-"v2.3.7"} + +ARCH=${ARCH:-"amd64 arm64 s390x"} + +build_script_commit="$(git show --oneline -q)" +temp_dir="$(readlink -f build-flannel-resources.tmp)" +rm -rf "$temp_dir" +mkdir "$temp_dir" +(cd "$temp_dir" + git clone https://github.com/coreos/flannel.git flannel \ + --branch "$FLANNEL_VERSION" \ + --depth 1 + + git clone https://github.com/coreos/etcd.git etcd \ + --branch "$ETCD_VERSION" \ + --depth 1 + + # Grab the user id and group id of this current user. + GROUP_ID=$(id -g) + USER_ID=$(id -u) + + for arch in $ARCH; do + echo "Building flannel $FLANNEL_VERSION for $arch" + (cd flannel + ARCH=$arch make dist/flanneld-$arch + ) + + echo "Building etcd $ETCD_VERSION for $arch" + docker run \ + --rm \ + -e GOOS=linux \ + -e GOARCH="$arch" \ + -v $temp_dir/etcd:/etcd \ + golang:1.15 \ + /bin/bash -c "cd /etcd && ./build && chown -R ${USER_ID}:${GROUP_ID} /etcd" + + rm -rf contents + mkdir contents + (cd contents + echo "flannel-$arch $FLANNEL_VERSION" >> BUILD_INFO + echo "etcdctl version $ETCD_VERSION" >> BUILD_INFO + echo "built $(date)" >> BUILD_INFO + echo "build script commit: $build_script_commit" >> BUILD_INFO + cp "$temp_dir"/etcd/bin/etcdctl . + cp "$temp_dir"/flannel/dist/flanneld-$arch ./flanneld + tar -caf "$temp_dir/flannel-$arch.tar.gz" . + ) + done +) +mv "$temp_dir"/flannel-*.tar.gz . +rm -rf "$temp_dir" diff --git a/flannel/config.yaml b/flannel/config.yaml new file mode 100644 index 0000000..9f042fb --- /dev/null +++ b/flannel/config.yaml @@ -0,0 +1,38 @@ +"options": + "nagios_context": + "default": "juju" + "type": "string" + "description": | + Used by the nrpe subordinate charms. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. + "nagios_servicegroups": + "default": "" + "type": "string" + "description": | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup + "iface": + "type": "string" + "default": "" + "description": | + The interface to bind flannel overlay networking. The default value is + the interface bound to the cni endpoint. + "cidr": + "type": "string" + "default": "10.1.0.0/16" + "description": | + Network CIDR to assign to Flannel + "port": + "type": "int" + "default": !!int "0" + "description": | + Network port to use for Flannel + "vni": + "type": "int" + "default": !!int "0" + "description": | + VXLAN network id to assign to Flannel diff --git a/flannel/copyright b/flannel/copyright new file mode 100644 index 0000000..1276da9 --- /dev/null +++ b/flannel/copyright @@ -0,0 +1,13 @@ +Copyright 2016 Canonical LTD + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/kata/copyright.layer-basic b/flannel/copyright.layer-basic similarity index 100% rename from kata/copyright.layer-basic rename to flannel/copyright.layer-basic diff --git a/flannel/copyright.layer-nagios b/flannel/copyright.layer-nagios new file mode 100644 index 0000000..c80db95 --- /dev/null +++ b/flannel/copyright.layer-nagios @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2016, Canonical Ltd. +License: GPL-3 + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License version 3, as + published by the Free Software Foundation. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranties of + MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR + PURPOSE. See the GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . diff --git a/kata/copyright.layer-options b/flannel/copyright.layer-options similarity index 100% rename from kata/copyright.layer-options rename to flannel/copyright.layer-options diff --git a/kata/copyright b/flannel/copyright.layer-status similarity index 100% rename from kata/copyright rename to flannel/copyright.layer-status diff --git a/kata/debug-scripts/charm-unitdata b/flannel/debug-scripts/charm-unitdata similarity index 100% rename from kata/debug-scripts/charm-unitdata rename to flannel/debug-scripts/charm-unitdata diff --git a/kata/debug-scripts/filesystem b/flannel/debug-scripts/filesystem similarity index 100% rename from kata/debug-scripts/filesystem rename to flannel/debug-scripts/filesystem diff --git a/kata/debug-scripts/juju-logs b/flannel/debug-scripts/juju-logs similarity index 100% rename from kata/debug-scripts/juju-logs rename to flannel/debug-scripts/juju-logs diff --git a/kata/debug-scripts/juju-network-get b/flannel/debug-scripts/juju-network-get similarity index 100% rename from kata/debug-scripts/juju-network-get rename to flannel/debug-scripts/juju-network-get diff --git a/kata/debug-scripts/network b/flannel/debug-scripts/network similarity index 100% rename from kata/debug-scripts/network rename to flannel/debug-scripts/network diff --git a/kata/debug-scripts/packages b/flannel/debug-scripts/packages similarity index 100% rename from kata/debug-scripts/packages rename to flannel/debug-scripts/packages diff --git a/kata/debug-scripts/sysctl b/flannel/debug-scripts/sysctl similarity index 100% rename from kata/debug-scripts/sysctl rename to flannel/debug-scripts/sysctl diff --git a/kata/debug-scripts/systemd b/flannel/debug-scripts/systemd similarity index 100% rename from kata/debug-scripts/systemd rename to flannel/debug-scripts/systemd diff --git a/kata/docs/status.md b/flannel/docs/status.md similarity index 100% rename from kata/docs/status.md rename to flannel/docs/status.md diff --git a/kata/hooks/post-series-upgrade b/flannel/hooks/cni-relation-broken similarity index 100% rename from kata/hooks/post-series-upgrade rename to flannel/hooks/cni-relation-broken diff --git a/kata/hooks/pre-series-upgrade b/flannel/hooks/cni-relation-changed similarity index 100% rename from kata/hooks/pre-series-upgrade rename to flannel/hooks/cni-relation-changed diff --git a/kata/hooks/start b/flannel/hooks/cni-relation-created similarity index 100% rename from kata/hooks/start rename to flannel/hooks/cni-relation-created diff --git a/kata/hooks/stop b/flannel/hooks/cni-relation-departed similarity index 100% rename from kata/hooks/stop rename to flannel/hooks/cni-relation-departed diff --git a/kata/hooks/untrusted-relation-broken b/flannel/hooks/cni-relation-joined similarity index 100% rename from kata/hooks/untrusted-relation-broken rename to flannel/hooks/cni-relation-joined diff --git a/kata/hooks/untrusted-relation-changed b/flannel/hooks/config-changed similarity index 100% rename from kata/hooks/untrusted-relation-changed rename to flannel/hooks/config-changed diff --git a/kata/hooks/untrusted-relation-created b/flannel/hooks/etcd-relation-broken similarity index 100% rename from kata/hooks/untrusted-relation-created rename to flannel/hooks/etcd-relation-broken diff --git a/kata/hooks/untrusted-relation-departed b/flannel/hooks/etcd-relation-changed similarity index 100% rename from kata/hooks/untrusted-relation-departed rename to flannel/hooks/etcd-relation-changed diff --git a/kata/hooks/untrusted-relation-joined b/flannel/hooks/etcd-relation-created similarity index 100% rename from kata/hooks/untrusted-relation-joined rename to flannel/hooks/etcd-relation-created diff --git a/kata/hooks/update-status b/flannel/hooks/etcd-relation-departed similarity index 100% rename from kata/hooks/update-status rename to flannel/hooks/etcd-relation-departed diff --git a/kata/hooks/upgrade-charm b/flannel/hooks/etcd-relation-joined similarity index 100% rename from kata/hooks/upgrade-charm rename to flannel/hooks/etcd-relation-joined diff --git a/kubernetes-master/hooks/cluster-dns-relation-broken b/flannel/hooks/hook.template old mode 100755 new mode 100644 similarity index 100% rename from kubernetes-master/hooks/cluster-dns-relation-broken rename to flannel/hooks/hook.template diff --git a/kubernetes-master/hooks/cluster-dns-relation-changed b/flannel/hooks/install similarity index 100% rename from kubernetes-master/hooks/cluster-dns-relation-changed rename to flannel/hooks/install diff --git a/kubernetes-master/hooks/cluster-dns-relation-created b/flannel/hooks/leader-elected similarity index 100% rename from kubernetes-master/hooks/cluster-dns-relation-created rename to flannel/hooks/leader-elected diff --git a/kubernetes-master/hooks/cluster-dns-relation-departed b/flannel/hooks/leader-settings-changed similarity index 100% rename from kubernetes-master/hooks/cluster-dns-relation-departed rename to flannel/hooks/leader-settings-changed diff --git a/kubernetes-master/hooks/cluster-dns-relation-joined b/flannel/hooks/nrpe-external-master-relation-broken similarity index 100% rename from kubernetes-master/hooks/cluster-dns-relation-joined rename to flannel/hooks/nrpe-external-master-relation-broken diff --git a/kubernetes-worker/hooks/kube-dns-relation-broken b/flannel/hooks/nrpe-external-master-relation-changed similarity index 100% rename from kubernetes-worker/hooks/kube-dns-relation-broken rename to flannel/hooks/nrpe-external-master-relation-changed diff --git a/kubernetes-worker/hooks/kube-dns-relation-changed b/flannel/hooks/nrpe-external-master-relation-created similarity index 100% rename from kubernetes-worker/hooks/kube-dns-relation-changed rename to flannel/hooks/nrpe-external-master-relation-created diff --git a/kubernetes-worker/hooks/kube-dns-relation-created b/flannel/hooks/nrpe-external-master-relation-departed similarity index 100% rename from kubernetes-worker/hooks/kube-dns-relation-created rename to flannel/hooks/nrpe-external-master-relation-departed diff --git a/kubernetes-worker/hooks/kube-dns-relation-departed b/flannel/hooks/nrpe-external-master-relation-joined similarity index 100% rename from kubernetes-worker/hooks/kube-dns-relation-departed rename to flannel/hooks/nrpe-external-master-relation-joined diff --git a/kubernetes-worker/hooks/kube-dns-relation-joined b/flannel/hooks/post-series-upgrade similarity index 100% rename from kubernetes-worker/hooks/kube-dns-relation-joined rename to flannel/hooks/post-series-upgrade diff --git a/flannel/hooks/pre-series-upgrade b/flannel/hooks/pre-series-upgrade new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/flannel/hooks/pre-series-upgrade @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/flannel/hooks/relations/etcd/.gitignore b/flannel/hooks/relations/etcd/.gitignore new file mode 100644 index 0000000..e43b0f9 --- /dev/null +++ b/flannel/hooks/relations/etcd/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/flannel/hooks/relations/etcd/README.md b/flannel/hooks/relations/etcd/README.md new file mode 100644 index 0000000..9ed51dd --- /dev/null +++ b/flannel/hooks/relations/etcd/README.md @@ -0,0 +1,89 @@ +# Overview + +This interface layer handles the communication with Etcd via the `etcd` +interface. + +# Usage + +## Requires + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.connected` The relation is established, but Etcd may not + yet have provided any connection or service information. + + * `{relation_name}.available` Etcd has provided its connection string + information, and is ready to serve as a KV store. + The provided information can be accessed via the following methods: + * `etcd.get_connection_string()` + * `etcd.get_version()` + * `{relation_name}.tls.available` Etcd has provided the connection string + information, and the tls client credentials to communicate with it. + The client credentials can be accessed via: + * `{relation_name}.get_client_credentials()` returning a dictionary of + the clinet certificate, key and CA. + * `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience + method to save the client certificate, key and CA to files of your + choosing. + + +For example, a common application for this is configuring an applications +backend key/value storage, like Docker. + +```python +@when('etcd.available', 'docker.available') +def swarm_etcd_cluster_setup(etcd): + con_string = etcd.connection_string().replace('http', 'etcd') + opts = {} + opts['connection_string'] = con_string + render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts) + +``` + + +## Provides + +A charm providing this interface is providing the Etcd rest api service. + +This interface layer will set the following states, as appropriate: + + * `{relation_name}.connected` One or more clients of any type have + been related. The charm should call the following methods to provide the + appropriate information to the clients: + + * `{relation_name}.set_connection_string(string, version)` + * `{relation_name}.set_client_credentials(key, cert, ca)` + +Example: + +```python +@when('db.connected') +def send_connection_details(db): + cert = leader_get('client_certificate') + key = leader_get('client_key') + ca = leader_get('certificate_authority') + # Set the key, cert, and ca on the db relation + db.set_client_credentials(key, cert, ca) + + port = hookenv.config().get('port') + # Get all the peers participating in the cluster relation. + addresses = cluster.get_peer_addresses() + connections = [] + for address in addresses: + connections.append('http://{0}:{1}'.format(address, port)) + # Set the connection string on the db relation. + db.set_connection_string(','.join(conections)) +``` + + +# Contact Information + +### Maintainer +- Charles Butler + + +# Etcd + +- [Etcd](https://coreos.com/etcd/) home page +- [Etcd bug trackers](https://github.com/coreos/etcd/issues) +- [Etcd Juju Charm](http://jujucharms.com/?text=etcd) diff --git a/kata/reactive/__init__.py b/flannel/hooks/relations/etcd/__init__.py similarity index 100% rename from kata/reactive/__init__.py rename to flannel/hooks/relations/etcd/__init__.py diff --git a/flannel/hooks/relations/etcd/interface.yaml b/flannel/hooks/relations/etcd/interface.yaml new file mode 100644 index 0000000..929b1d5 --- /dev/null +++ b/flannel/hooks/relations/etcd/interface.yaml @@ -0,0 +1,4 @@ +name: etcd +summary: Interface for relating to ETCD +version: 2 +maintainer: "Charles Butler " diff --git a/flannel/hooks/relations/etcd/peers.py b/flannel/hooks/relations/etcd/peers.py new file mode 100644 index 0000000..90980d1 --- /dev/null +++ b/flannel/hooks/relations/etcd/peers.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdPeer(RelationBase): + '''This class handles peer relation communication by setting states that + the reactive code can respond to. ''' + + scope = scopes.UNIT + + @hook('{peers:etcd}-relation-joined') + def peer_joined(self): + '''A new peer has joined, set the state on the unit so we can track + when they are departed. ''' + conv = self.conversation() + conv.set_state('{relation_name}.joined') + + @hook('{peers:etcd}-relation-departed') + def peers_going_away(self): + '''Trigger a state on the unit that it is leaving. We can use this + state in conjunction with the joined state to determine which unit to + unregister from the etcd cluster. ''' + conv = self.conversation() + conv.remove_state('{relation_name}.joined') + conv.set_state('{relation_name}.departing') + + def dismiss(self): + '''Remove the departing state from all other units in the conversation, + and we can resume normal operation. + ''' + for conv in self.conversations(): + conv.remove_state('{relation_name}.departing') + + def get_peers(self): + '''Return a list of names for the peers participating in this + conversation scope. ''' + peers = [] + # Iterate over all the conversations of this type. + for conversation in self.conversations(): + peers.append(conversation.scope) + return peers + + def set_db_ingress_address(self, address): + '''Set the ingress address belonging to the db relation.''' + for conversation in self.conversations(): + conversation.set_remote('db-ingress-address', address) + + def get_db_ingress_addresses(self): + '''Return a list of db ingress addresses''' + addresses = [] + # Iterate over all the conversations of this type. + for conversation in self.conversations(): + address = conversation.get_remote('db-ingress-address') + if address: + addresses.append(address) + return addresses diff --git a/flannel/hooks/relations/etcd/provides.py b/flannel/hooks/relations/etcd/provides.py new file mode 100644 index 0000000..3cfc174 --- /dev/null +++ b/flannel/hooks/relations/etcd/provides.py @@ -0,0 +1,47 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdProvider(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:etcd}-relation-{joined,changed}') + def joined_or_changed(self): + ''' Set the connected state from the provides side of the relation. ''' + self.set_state('{relation_name}.connected') + + @hook('{provides:etcd}-relation-{broken,departed}') + def broken_or_departed(self): + '''Remove connected state from the provides side of the relation. ''' + conv = self.conversation() + if len(conv.units) == 1: + conv.remove_state('{relation_name}.connected') + + def set_client_credentials(self, key, cert, ca): + ''' Set the client credentials on the global conversation for this + relation. ''' + self.set_remote('client_key', key) + self.set_remote('client_ca', ca) + self.set_remote('client_cert', cert) + + def set_connection_string(self, connection_string, version=''): + ''' Set the connection string on the global conversation for this + relation. ''' + # Note: Version added as a late-dependency for 2 => 3 migration + # If no version is specified, consumers should presume etcd 2.x + self.set_remote('connection_string', connection_string) + self.set_remote('version', version) diff --git a/flannel/hooks/relations/etcd/requires.py b/flannel/hooks/relations/etcd/requires.py new file mode 100644 index 0000000..435532f --- /dev/null +++ b/flannel/hooks/relations/etcd/requires.py @@ -0,0 +1,80 @@ +#!/usr/bin/python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from charms.reactive import RelationBase +from charms.reactive import hook +from charms.reactive import scopes + + +class EtcdClient(RelationBase): + scope = scopes.GLOBAL + + @hook('{requires:etcd}-relation-{joined,changed}') + def changed(self): + ''' Indicate the relation is connected, and if the relation data is + set it is also available. ''' + self.set_state('{relation_name}.connected') + + if self.get_connection_string(): + self.set_state('{relation_name}.available') + # Get the ca, key, cert from the relation data. + cert = self.get_client_credentials() + # The tls state depends on the existance of the ca, key and cert. + if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa + self.set_state('{relation_name}.tls.available') + + @hook('{requires:etcd}-relation-{broken, departed}') + def broken(self): + ''' Indicate the relation is no longer available and not connected. ''' + self.remove_state('{relation_name}.available') + self.remove_state('{relation_name}.connected') + self.remove_state('{relation_name}.tls.available') + + def connection_string(self): + ''' This method is depreciated but ensures backward compatibility + @see get_connection_string(self). ''' + return self.get_connection_string() + + def get_connection_string(self): + ''' Return the connection string, if available, or None. ''' + return self.get_remote('connection_string') + + def get_version(self): + ''' Return the version of the etd protocol being used, or None. ''' + return self.get_remote('version') + + def get_client_credentials(self): + ''' Return a dict with the client certificate, ca and key to + communicate with etcd using tls. ''' + return {'client_cert': self.get_remote('client_cert'), + 'client_key': self.get_remote('client_key'), + 'client_ca': self.get_remote('client_ca')} + + def save_client_credentials(self, key, cert, ca): + ''' Save all the client certificates for etcd to local files. ''' + self._save_remote_data('client_cert', cert) + self._save_remote_data('client_key', key) + self._save_remote_data('client_ca', ca) + + def _save_remote_data(self, key, path): + ''' Save the remote data to a file indicated by path creating the + parent directory if needed.''' + value = self.get_remote(key) + if value: + parent = os.path.dirname(path) + if not os.path.isdir(parent): + os.makedirs(parent) + with open(path, 'w') as stream: + stream.write(value) diff --git a/flannel/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/flannel/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/flannel/hooks/relations/kubernetes-cni/.gitignore b/flannel/hooks/relations/kubernetes-cni/.gitignore new file mode 100644 index 0000000..8d150f3 --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/kube-dns/__init__.py b/flannel/hooks/relations/kubernetes-cni/README.md similarity index 100% rename from kubernetes-worker/hooks/relations/kube-dns/__init__.py rename to flannel/hooks/relations/kubernetes-cni/README.md diff --git a/flannel/hooks/relations/kubernetes-cni/__init__.py b/flannel/hooks/relations/kubernetes-cni/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/flannel/hooks/relations/kubernetes-cni/interface.yaml b/flannel/hooks/relations/kubernetes-cni/interface.yaml new file mode 100644 index 0000000..7e3c123 --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/interface.yaml @@ -0,0 +1,6 @@ +name: kubernetes-cni +summary: Interface for relating various CNI implementations +version: 0 +maintainer: "George Kraft " +ignore: +- tests diff --git a/flannel/hooks/relations/kubernetes-cni/provides.py b/flannel/hooks/relations/kubernetes-cni/provides.py new file mode 100644 index 0000000..9095c19 --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/provides.py @@ -0,0 +1,89 @@ +#!/usr/bin/python + +from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path +from charms.reactive import Endpoint +from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag + + +class CNIPluginProvider(Endpoint): + def manage_flags(self): + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")): + clear_flag(self.expand_name("{endpoint_name}.configured")) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) + + def set_config(self, is_master): + """Relays a dict of kubernetes configuration information.""" + for relation in self.relations: + relation.to_publish_raw.update({"is_master": is_master}) + set_flag(self.expand_name("{endpoint_name}.configured")) + + def config_available(self): + """Ensures all config from the CNI plugin is available.""" + goal_state = hookenv.goal_state() + related_apps = [ + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app + ] + if not related_apps: + return False + configs = self.get_configs() + return all( + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] + ) + + def get_config(self, default=None): + """Get CNI config for one related application. + + If default is specified, and there is a related application with a + matching name, then that application is chosen. Otherwise, the + application is chosen alphabetically. + + Whichever application is chosen, that application's CNI config is + returned. + """ + configs = self.get_configs() + if not configs: + return {} + elif default and default not in configs: + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") + return self.get_config() + elif default: + return configs.get(default, {}) + else: + return configs.get(sorted(configs)[0], {}) + + def get_configs(self): + """Get CNI configs for all related applications. + + This returns a mapping of application names to CNI configs. Here's an + example return value: + { + 'flannel': { + 'cidr': '10.1.0.0/16', + 'cni-conf-file': '10-flannel.conflist' + }, + 'calico': { + 'cidr': '192.168.0.0/16', + 'cni-conf-file': '10-calico.conflist' + } + } + """ + return { + relation.application_name: relation.joined_units.received_raw + for relation in self.relations + if relation.application_name + } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/flannel/hooks/relations/kubernetes-cni/requires.py b/flannel/hooks/relations/kubernetes-cni/requires.py new file mode 100644 index 0000000..2067826 --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/requires.py @@ -0,0 +1,54 @@ +#!/usr/bin/python + +from charmhelpers.core import unitdata +from charms.reactive import Endpoint +from charms.reactive import when_any, when_not +from charms.reactive import set_state, remove_state + +db = unitdata.kv() + + +class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) + + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") + def changed(self): + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) + config = self.get_config() + if config["is_master"] == "True": + set_state(self.expand_name("{endpoint_name}.is-master")) + set_state(self.expand_name("{endpoint_name}.configured")) + elif config["is_master"] == "False": + set_state(self.expand_name("{endpoint_name}.is-worker")) + set_state(self.expand_name("{endpoint_name}.configured")) + else: + remove_state(self.expand_name("{endpoint_name}.configured")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) + + @when_not("endpoint.{endpoint_name}.joined") + def broken(self): + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("{endpoint_name}.is-master")) + remove_state(self.expand_name("{endpoint_name}.is-worker")) + remove_state(self.expand_name("{endpoint_name}.configured")) + + def get_config(self): + """Get the kubernetes configuration information.""" + return self.all_joined_units.received_raw + + def set_config(self, cidr, cni_conf_file): + """Sets the CNI configuration information.""" + for relation in self.relations: + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/flannel/hooks/relations/kubernetes-cni/tox.ini b/flannel/hooks/relations/kubernetes-cni/tox.ini new file mode 100644 index 0000000..69ab91a --- /dev/null +++ b/flannel/hooks/relations/kubernetes-cni/tox.ini @@ -0,0 +1,27 @@ +[tox] +skipsdist = True +envlist = lint,py3 + +[testenv] +basepython = python3 +setenv = + PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace +deps = + pyyaml + pytest + flake8 + black + ipdb + charms.unit_test +commands = pytest --tb native -s {posargs} + +[testenv:lint] +envdir = {toxworkdir}/py3 +commands = + flake8 {toxinidir} + black --check {toxinidir} + +[flake8] +exclude=.tox +max-line-length = 88 diff --git a/flannel/hooks/relations/nrpe-external-master/README.md b/flannel/hooks/relations/nrpe-external-master/README.md new file mode 100644 index 0000000..e33deb8 --- /dev/null +++ b/flannel/hooks/relations/nrpe-external-master/README.md @@ -0,0 +1,66 @@ +# nrpe-external-master interface + +Use this interface to register nagios checks in your charm layers. + +## Purpose + +This interface is designed to interoperate with the +[nrpe-external-master](https://jujucharms.com/nrpe-external-master) subordinate charm. + +## How to use in your layers + +The event handler for `nrpe-external-master.available` is called with an object +through which you can register your own custom nagios checks, when a relation +is established with `nrpe-external-master:nrpe-external-master`. + +This object provides a method, + +_add_check_(args, name=_check_name_, description=_description_, context=_context_, unit=_unit_) + +which is called to register a nagios plugin check for your service. + +All arguments are required. + +*args* is a list of nagios plugin command line arguments, starting with the path to the plugin executable. + +*name* is the name of the check registered in nagios + +*description* is some text that describes what the check is for and what it does + +*context* is the nagios context name, something that identifies your application + +*unit* is `hookenv.local_unit()` + +The nrpe subordinate installs `check_http`, so you can use it like this: + +``` +@when('nrpe-external-master.available') +def setup_nagios(nagios): + config = hookenv.config() + unit_name = hookenv.local_unit() + nagios.add_check(['/usr/lib/nagios/plugins/check_http', + '-I', '127.0.0.1', '-p', str(config['port']), + '-e', " 200 OK", '-u', '/publickey'], + name="check_http", + description="Verify my awesome service is responding", + context=config["nagios_context"], + unit=unit_name, + ) +``` +If your `nagios.add_check` defines a custom plugin, you will also need to restart the `nagios-nrpe-server` service. + +Consult the nagios documentation for more information on [how to write your own +plugins](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/4/en/pluginapi.html) +or [find one](https://www.nagios.org/projects/nagios-plugins/) that does what you need. + +## Example deployment + +``` +$ juju deploy your-awesome-charm +$ juju deploy nrpe-external-master --config site-nagios.yaml +$ juju add-relation your-awesome-charm nrpe-external-master +``` + +where `site-nagios.yaml` has the necessary configuration settings for the +subordinate to connect to nagios. + diff --git a/flannel/hooks/relations/nrpe-external-master/__init__.py b/flannel/hooks/relations/nrpe-external-master/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/flannel/hooks/relations/nrpe-external-master/interface.yaml b/flannel/hooks/relations/nrpe-external-master/interface.yaml new file mode 100644 index 0000000..859a423 --- /dev/null +++ b/flannel/hooks/relations/nrpe-external-master/interface.yaml @@ -0,0 +1,3 @@ +name: nrpe-external-master +summary: Nagios interface +version: 1 diff --git a/flannel/hooks/relations/nrpe-external-master/provides.py b/flannel/hooks/relations/nrpe-external-master/provides.py new file mode 100644 index 0000000..b6c7f0d --- /dev/null +++ b/flannel/hooks/relations/nrpe-external-master/provides.py @@ -0,0 +1,91 @@ +import datetime +import os + +from charmhelpers.core import hookenv + +from charms.reactive import hook +from charms.reactive import RelationBase +from charms.reactive import scopes + + +class NrpeExternalMasterProvides(RelationBase): + scope = scopes.GLOBAL + + @hook('{provides:nrpe-external-master}-relation-{joined,changed}') + def changed_nrpe(self): + self.set_state('{relation_name}.available') + + @hook('{provides:nrpe-external-master}-relation-{broken,departed}') + def broken_nrpe(self): + self.remove_state('{relation_name}.available') + + def add_check(self, args, name=None, description=None, context=None, + servicegroups=None, unit=None): + nagios_files = self.get_local('nagios.check.files', []) + + if not unit: + unit = hookenv.local_unit() + unit = unit.replace('/', '-') + context = self.get_remote('nagios_host_context', context) + host_name = self.get_remote('nagios_hostname', + '%s-%s' % (context, unit)) + + check_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +command[%(check_name)s]=%(check_args)s +""" + service_tmpl = """ +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service { + use active-service + host_name %(host_name)s + service_description %(description)s + check_command check_nrpe!%(check_name)s + servicegroups %(servicegroups)s +} +""" + check_filename = "/etc/nagios/nrpe.d/check_%s.cfg" % (name) + with open(check_filename, "w") as fh: + fh.write(check_tmpl % { + 'check_args': ' '.join(args), + 'check_name': name, + }) + nagios_files.append(check_filename) + + service_filename = "/var/lib/nagios/export/service__%s_%s.cfg" % ( + unit, name) + with open(service_filename, "w") as fh: + fh.write(service_tmpl % { + 'servicegroups': servicegroups or context, + 'context': context, + 'description': description, + 'check_name': name, + 'host_name': host_name, + 'unit_name': unit, + }) + nagios_files.append(service_filename) + + self.set_local('nagios.check.files', nagios_files) + + def removed(self): + files = self.get_local('nagios.check.files', []) + for f in files: + try: + os.unlink(f) + except Exception as e: + hookenv.log("failed to remove %s: %s" % (f, e)) + self.set_local('nagios.check.files', []) + self.remove_state('{relation_name}.removed') + + def added(self): + self.updated() + + def updated(self): + relation_info = { + 'timestamp': datetime.datetime.now().isoformat(), + } + self.set_remote(**relation_info) diff --git a/flannel/hooks/relations/nrpe-external-master/requires.py b/flannel/hooks/relations/nrpe-external-master/requires.py new file mode 100644 index 0000000..e69de29 diff --git a/flannel/hooks/start b/flannel/hooks/start new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/flannel/hooks/start @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/flannel/hooks/stop b/flannel/hooks/stop new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/flannel/hooks/stop @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/flannel/hooks/update-status b/flannel/hooks/update-status new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/flannel/hooks/update-status @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/flannel/hooks/upgrade-charm b/flannel/hooks/upgrade-charm new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/flannel/hooks/upgrade-charm @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/flannel/icon.svg b/flannel/icon.svg new file mode 100644 index 0000000..cf93867 --- /dev/null +++ b/flannel/icon.svg @@ -0,0 +1,357 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + diff --git a/kata/layer.yaml b/flannel/layer.yaml similarity index 54% rename from kata/layer.yaml rename to flannel/layer.yaml index 7dfb411..0e3a3c0 100644 --- a/kata/layer.yaml +++ b/flannel/layer.yaml @@ -1,20 +1,26 @@ "includes": - "layer:options" - "layer:basic" +- "interface:nrpe-external-master" +- "interface:etcd" +- "interface:kubernetes-cni" - "layer:debug" -- "interface:container-runtime" -- "interface:untrusted-container-runtime" +- "layer:nagios" - "layer:status" +- "layer:kubernetes-common" "exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"] "options": "basic": - "packages": [] - "python_packages": [] "use_venv": !!bool "true" + "packages": + - "net-tools" + "python_packages": [] "include_system_packages": !!bool "false" "debug": {} + "nagios": {} "status": "patch-hookenv": !!bool "true" - "kata": {} -"repo": "https://github.com/charmed-kubernetes/charm-kata" -"is": "kata" + "kubernetes-common": {} + "flannel": {} +"repo": "https://github.com/juju-solutions/charm-flannel.git" +"is": "flannel" diff --git a/flannel/lib/charms/flannel/common.py b/flannel/lib/charms/flannel/common.py new file mode 100644 index 0000000..6b7c44e --- /dev/null +++ b/flannel/lib/charms/flannel/common.py @@ -0,0 +1,30 @@ +from time import sleep + + +def retry(times, delay_secs): + """ Decorator for retrying a method call. + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """ Decorator to wrap the function provided. + Args: + func: Provided function should return either True od False + Returns: A callable that would return the last call outcome + """ + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + return _wrapped + + return retry_decorator diff --git a/kata/lib/charms/layer/__init__.py b/flannel/lib/charms/layer/__init__.py similarity index 100% rename from kata/lib/charms/layer/__init__.py rename to flannel/lib/charms/layer/__init__.py diff --git a/kata/lib/charms/layer/basic.py b/flannel/lib/charms/layer/basic.py similarity index 88% rename from kata/lib/charms/layer/basic.py rename to flannel/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/kata/lib/charms/layer/basic.py +++ b/flannel/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/kata/lib/charms/layer/execd.py b/flannel/lib/charms/layer/execd.py similarity index 100% rename from kata/lib/charms/layer/execd.py rename to flannel/lib/charms/layer/execd.py diff --git a/flannel/lib/charms/layer/kubernetes_common.py b/flannel/lib/charms/layer/kubernetes_common.py new file mode 100644 index 0000000..fb14ad2 --- /dev/null +++ b/flannel/lib/charms/layer/kubernetes_common.py @@ -0,0 +1,924 @@ +#!/usr/bin/env python + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import re +import os +import subprocess +import hashlib +import json +import traceback +import random +import string +import tempfile +import yaml + +from base64 import b64decode, b64encode +from pathlib import Path +from subprocess import check_output, check_call +from socket import gethostname, getfqdn +from shlex import split +from subprocess import CalledProcessError +from charmhelpers.core import hookenv, unitdata +from charmhelpers.core import host +from charmhelpers.core.templating import render +from charms.reactive import endpoint_from_flag, is_state +from time import sleep + +AUTH_SECRET_NS = "kube-system" +AUTH_SECRET_TYPE = "juju.is/token-auth" + +db = unitdata.kv() +kubeclientconfig_path = "/root/.kube/config" +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +kubeproxyconfig_path = "/root/cdk/kubeproxyconfig" +certs_dir = Path("/root/cdk") +ca_crt_path = certs_dir / "ca.crt" +server_crt_path = certs_dir / "server.crt" +server_key_path = certs_dir / "server.key" +client_crt_path = certs_dir / "client.crt" +client_key_path = certs_dir / "client.key" + + +def get_version(bin_name): + """Get the version of an installed Kubernetes binary. + + :param str bin_name: Name of binary + :return: 3-tuple version (maj, min, patch) + + Example:: + + >>> `get_version('kubelet') + (1, 6, 0) + + """ + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") + return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def retry(times, delay_secs): + """Decorator for retrying a method call. + + Args: + times: How many times should we retry before giving up + delay_secs: Delay in secs + + Returns: A callable that would return the last call outcome + """ + + def retry_decorator(func): + """Decorator to wrap the function provided. + + Args: + func: Provided function should return either True od False + + Returns: A callable that would return the last call outcome + + """ + + def _wrapped(*args, **kwargs): + res = func(*args, **kwargs) + attempt = 0 + while not res and attempt < times: + sleep(delay_secs) + res = func(*args, **kwargs) + if res: + break + attempt += 1 + return res + + return _wrapped + + return retry_decorator + + +def calculate_resource_checksum(resource): + """Calculate a checksum for a resource""" + md5 = hashlib.md5() + path = hookenv.resource_get(resource) + if path: + with open(path, "rb") as f: + data = f.read() + md5.update(data) + return md5.hexdigest() + + +def get_resource_checksum_db_key(checksum_prefix, resource): + """Convert a resource name to a resource checksum database key.""" + return checksum_prefix + resource + + +def migrate_resource_checksums(checksum_prefix, snap_resources): + """Migrate resource checksums from the old schema to the new one""" + for resource in snap_resources: + new_key = get_resource_checksum_db_key(checksum_prefix, resource) + if not db.get(new_key): + path = hookenv.resource_get(resource) + if path: + # old key from charms.reactive.helpers.any_file_changed + old_key = "reactive.files_changed." + path + old_checksum = db.get(old_key) + db.set(new_key, old_checksum) + else: + # No resource is attached. Previously, this meant no checksum + # would be calculated and stored. But now we calculate it as if + # it is a 0-byte resource, so let's go ahead and do that. + zero_checksum = hashlib.md5().hexdigest() + db.set(new_key, zero_checksum) + + +def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): + hookenv.status_set("maintenance", "Checking resources") + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + old_checksum = db.get(key) + new_checksum = calculate_resource_checksum(resource) + if new_checksum != old_checksum: + return True + return False + + +def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): + for resource in snap_resources: + key = get_resource_checksum_db_key(checksum_prefix, resource) + checksum = calculate_resource_checksum(resource) + db.set(key, checksum) + + +def get_ingress_address(endpoint_name, ignore_addresses=None): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + # if they don't have ingress-addresses they are running a juju that + # doesn't support spaces, so just return the private address + return hookenv.unit_get("private-address") + + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) + + # Need to prefer non-fan IP addresses due to various issues, e.g. + # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 + # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll + # prioritize those last. Not technically correct, but good enough. + try: + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 + addresses = sorted(addresses, key=sort_key) + except Exception: + hookenv.log(traceback.format_exc()) + + return addresses[0] + + +def get_ingress_address6(endpoint_name): + try: + network_info = hookenv.network_get(endpoint_name) + except NotImplementedError: + network_info = {} + + if not network_info or "ingress-addresses" not in network_info: + return None + + addresses = network_info["ingress-addresses"] + + for addr in addresses: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version == 6: + return str(ip_addr) + else: + return None + + +def service_restart(service_name): + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) + host.service_restart(service_name) + + +def service_start(service_name): + hookenv.log("Starting {0} service.".format(service_name)) + host.service_stop(service_name) + + +def service_stop(service_name): + hookenv.log("Stopping {0} service.".format(service_name)) + host.service_stop(service_name) + + +def arch(): + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" + # Get the package architecture for this system. + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode("utf-8") + return architecture + + +def get_service_ip(service, namespace="kube-system", errors_fatal=True): + try: + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) + except CalledProcessError: + if errors_fatal: + raise + else: + return None + else: + svc = json.loads(output.decode()) + return svc["spec"]["clusterIP"] + + +def kubectl(*args): + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) + return check_output(command) + + +def kubectl_success(*args): + """Runs kubectl with the given args. Returns True if successful, False if + not.""" + try: + kubectl(*args) + return True + except CalledProcessError: + return False + + +def kubectl_manifest(operation, manifest): + """Wrap the kubectl creation command when using filepath resources + :param operation - one of get, create, delete, replace + :param manifest - filepath to the manifest + """ + # Deletions are a special case + if operation == "delete": + # Ensure we immediately remove requested resources with --now + return kubectl_success(operation, "-f", manifest, "--now") + else: + # Guard against an error re-creating the same manifest multiple times + if operation == "create": + # If we already have the definition, its probably safe to assume + # creation was true. + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) + return True + # Execute the requested command that did not match any of the special + # cases above + return kubectl_success(operation, "-f", manifest) + + +def get_node_name(): + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": + return getfqdn().lower() + else: + return gethostname().lower() + + +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied + arguments for values of the Kubernetes server, CA, key, certificate, user + context and cluster.""" + if not key and not certificate and not password and not token: + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") + + # Create the config file with the address of the master server. + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) + # Delete old users + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) + # Create the credentials using the client flags. + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) + + if key and certificate: + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) + if password: + cmd = "{0} --username={1} --password={2}".format(cmd, user, password) + # This is mutually exclusive from password. They will not work together. + if token: + cmd = "{0} --token={1}".format(cmd, token) + check_call(split(cmd)) + # Create a default context with the cluster. + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) + # Make the config use this new context. + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) + if keystone: + # create keystone user + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) + check_call(split(cmd)) + # create keystone context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) + check_call(split(cmd)) + # use keystone context + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) + check_call(split(cmd)) + # manually add exec command until kubectl can do it for us + with open(new_kubeconfig, "r") as f: + content = f.read() + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user + user: + exec: + command: "/snap/bin/client-keystone-auth" + apiVersion: "client.authentication.k8s.io/v1beta1" +""", + ) + with open(new_kubeconfig, "w") as f: + f.write(content) + if aws_iam_cluster_id: + # create aws-iam context + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) + + # append a user for aws-iam + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) + + # not going to use aws-iam context by default since we don't have + # the desired arn. This will make the config not usable if copied. + + # cmd = 'kubectl config --kubeconfig={0} ' \ + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) + # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) + + +def parse_extra_args(config_key): + elements = hookenv.config().get(config_key, "").split() + args = {} + + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + + return args + + +def configure_kubernetes_service(key, service, base_args, extra_args_key): + db = unitdata.kv() + + prev_args_key = key + service + prev_snap_args = db.get(prev_args_key) or {} + + extra_args = parse_extra_args(extra_args_key) + + args = {} + args.update(base_args) + args.update(extra_args) + + # CIS benchmark action may inject kv config to pass failing tests. Merge + # these after the func args as they should take precedence. + cis_args_key = "cis-" + service + cis_args = db.get(cis_args_key) or {} + args.update(cis_args) + + # Remove any args with 'None' values (all k8s args are 'k=v') and + # construct an arg string for use by 'snap set'. + args = {k: v for k, v in args.items() if v is not None} + args = ['--%s="%s"' % arg for arg in args.items()] + args = " ".join(args) + + snap_opts = {} + for arg in prev_snap_args: + # remove previous args by setting to null + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] + + cmd = ["snap", "set", service] + snap_opts + check_call(cmd) + + # Now that we've started doing snap configuration through the "args" + # option, we should never need to clear previous args again. + db.set(prev_args_key, {}) + + +def _snap_common_path(component): + return Path("/var/snap/{}/common".format(component)) + + +def cloud_config_path(component): + return _snap_common_path(component) / "cloud-config.conf" + + +def _gcp_creds_path(component): + return _snap_common_path(component) / "gcp-creds.json" + + +def _daemon_env_path(component): + return _snap_common_path(component) / "environment" + + +def _cloud_endpoint_ca_path(component): + return _snap_common_path(component) / "cloud-endpoint-ca.crt" + + +def encryption_config_path(): + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" + + +def write_gcp_snap_config(component): + # gcp requires additional credentials setup + gcp = endpoint_from_flag("endpoint.gcp.ready") + creds_path = _gcp_creds_path(component) + with creds_path.open("w") as fp: + os.fchmod(fp.fileno(), 0o600) + fp.write(gcp.credentials) + + # create a cloud-config file that sets token-url to nil to make the + # services use the creds env var instead of the metadata server, as + # well as making the cluster multizone + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) + + daemon_env_path = _daemon_env_path(component) + if daemon_env_path.exists(): + daemon_env = daemon_env_path.read_text() + if not daemon_env.endswith("\n"): + daemon_env += "\n" + else: + daemon_env = "" + if gcp_creds_env_key not in daemon_env: + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) + daemon_env_path.parent.mkdir(parents=True, exist_ok=True) + daemon_env_path.write_text(daemon_env) + + +def generate_openstack_cloud_config(): + # openstack requires additional credentials setup + openstack = endpoint_from_flag("endpoint.openstack.ready") + + lines = [ + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), + ] + if openstack.endpoint_tls_ca: + lines.append("ca-file = /etc/config/endpoint-ca.cert") + + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) + + if openstack.has_octavia in (True, None): + # Newer integrator charm will detect whether underlying OpenStack has + # Octavia enabled so we can set this intelligently. If we're still + # related to an older integrator, though, default to assuming Octavia + # is available. + lines.append("use-octavia = true") + else: + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") + if openstack.subnet_id: + lines.append("subnet-id = {}".format(openstack.subnet_id)) + if openstack.floating_network_id: + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) + if openstack.lb_method: + lines.append("lb-method = {}".format(openstack.lb_method)) + if openstack.manage_security_groups: + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") + if openstack.bs_version is not None: + lines.append("bs-version = {}".format(openstack.bs_version)) + if openstack.trust_device_path is not None: + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) + if openstack.ignore_volume_az is not None: + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" + + +def write_azure_snap_config(component): + azure = endpoint_from_flag("endpoint.azure.ready") + comp_cloud_config_path = cloud_config_path(component) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) + + +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): + kube_proxy_opts = {} + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" + num_apis = len(api_servers) + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() + if bind_address: + kube_proxy_opts["bind-address"] = bind_address + elif is_ipv6(cluster_cidr): + kube_proxy_opts["bind-address"] = "::" + + if host.is_container(): + kube_proxy_opts["conntrack-max-per-core"] = "0" + + if is_dual_stack(cluster_cidr): + kube_proxy_opts["feature-gates"] = "IPv6DualStack=true" + + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) + + +def get_unit_number(): + return int(hookenv.local_unit().split("/")[1]) + + +def cluster_cidr(): + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") + if not cni: + return None + config = hookenv.config() + if "default-cni" in config: + # master + default_cni = config["default-cni"] + else: + # worker + kube_control = endpoint_from_flag("kube-control.dns.available") + if not kube_control: + return None + default_cni = kube_control.get_default_cni() + return cni.get_config(default=default_cni)["cidr"] + + +def is_dual_stack(cidrs): + """Detect IPv4/IPv6 dual stack from CIDRs""" + return {net.version for net in get_networks(cidrs)} == {4, 6} + + +def is_ipv4(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv4_network(cidrs) is not None + + +def is_ipv6(cidrs): + """Detect IPv6 from CIDRs""" + return get_ipv6_network(cidrs) is not None + + +def is_ipv6_preferred(cidrs): + """Detect if IPv6 is preffered from CIDRs""" + return get_networks(cidrs)[0].version == 6 + + +def get_networks(cidrs): + """Convert a comma-separated list of CIDRs to a list of networks.""" + if not cidrs: + return [] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] + + +def get_ipv4_network(cidrs): + """Get the IPv4 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(4) + + +def get_ipv6_network(cidrs): + """Get the IPv6 network from the given CIDRs or None""" + return {net.version: net for net in get_networks(cidrs)}.get(6) + + +def enable_ipv6_forwarding(): + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) + + +def get_bind_addrs(ipv4=True, ipv6=True): + """Get all global-scoped addresses that we might bind to.""" + try: + output = check_output(["ip", "-br", "addr", "show", "scope", "global"]) + except CalledProcessError: + # stderr will have any details, and go to the log + hookenv.log("Unable to determine global addresses", hookenv.ERROR) + return [] + + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") + accept_versions = set() + if ipv4: + accept_versions.add(4) + if ipv6: + accept_versions.add(6) + + addrs = [] + for line in output.decode("utf8").splitlines(): + intf, state, *intf_addrs = line.split() + if state != "UP" or any( + intf.startswith(prefix) for prefix in ignore_interfaces + ): + continue + for addr in intf_addrs: + ip_addr = ipaddress.ip_interface(addr).ip + if ip_addr.version in accept_versions: + addrs.append(str(ip_addr)) + return addrs + + +class InvalidVMwareHost(Exception): + pass + + +def _get_vmware_uuid(): + serial_id_file = "/sys/class/dmi/id/product_serial" + # The serial id from VMWare VMs comes in following format: + # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 + try: + with open(serial_id_file, "r") as f: + serial_string = f.read().strip() + if "VMware-" not in serial_string: + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) + raise InvalidVMwareHost + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) + uuid = "%s-%s-%s-%s-%s" % ( + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) + except IOError as err: + hookenv.log("Unable to read UUID from sysfs: {}".format(err)) + uuid = "UNKNOWN" + + return uuid + + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None diff --git a/flannel/lib/charms/layer/nagios.py b/flannel/lib/charms/layer/nagios.py new file mode 100644 index 0000000..f6ad998 --- /dev/null +++ b/flannel/lib/charms/layer/nagios.py @@ -0,0 +1,60 @@ +from pathlib import Path + +NAGIOS_PLUGINS_DIR = '/usr/lib/nagios/plugins' + + +def install_nagios_plugin_from_text(text, plugin_name): + """ Install a nagios plugin. + + Args: + text: Plugin source code (str) + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + # we could complain here, test the files are the same contents, or + # just bail. Idempotency is a big deal in Juju, so I'd like to be + # ok with being called with the same file multiple times, but we + # certainly want to catch the case where multiple layers are using + # the same filename for their nagios checks. + dest = dest_path.read_text() + if dest == text: + # same file + return dest_path + # different file contents! + # maybe someone changed options or something so we need to write + # it again + + dest_path.write_text(text) + dest_path.chmod(0o755) + + return dest_path + + +def install_nagios_plugin_from_file(source_file_path, plugin_name): + """ Install a nagios plugin. + + Args: + source_file_path: Path to plugin source file + plugin_name: Name of the plugin in nagios + + Returns: Full path to installed plugin + """ + + return install_nagios_plugin_from_text(Path(source_file_path).read_text(), + plugin_name) + + +def remove_nagios_plugin(plugin_name): + """ Remove a nagios plugin. + + Args: + plugin_name: Name of the plugin in nagios + + Returns: None + """ + dest_path = Path(NAGIOS_PLUGINS_DIR) / plugin_name + if dest_path.exists(): + dest_path.unlink() diff --git a/kata/lib/charms/layer/options.py b/flannel/lib/charms/layer/options.py similarity index 100% rename from kata/lib/charms/layer/options.py rename to flannel/lib/charms/layer/options.py diff --git a/kata/lib/charms/layer/status.py b/flannel/lib/charms/layer/status.py similarity index 100% rename from kata/lib/charms/layer/status.py rename to flannel/lib/charms/layer/status.py diff --git a/kata/lib/debug_script.py b/flannel/lib/debug_script.py similarity index 100% rename from kata/lib/debug_script.py rename to flannel/lib/debug_script.py diff --git a/kata/make_docs b/flannel/make_docs similarity index 100% rename from kata/make_docs rename to flannel/make_docs diff --git a/flannel/metadata.yaml b/flannel/metadata.yaml new file mode 100644 index 0000000..972ed5d --- /dev/null +++ b/flannel/metadata.yaml @@ -0,0 +1,42 @@ +"name": "flannel" +"summary": "A charm that provides a robust Software Defined Network" +"maintainers": +- "Tim Van Steenburgh " +- "George Kraft " +- "Rye Terrell " +- "Konstantinos Tsakalozos " +- "Charles Butler " +"description": | + it is a generic overlay network that can be used as a simple alternative + to existing software defined networking solutions +"tags": +- "misc" +- "networking" +"series": +- "focal" +- "bionic" +- "xenial" +"requires": + "etcd": + "interface": "etcd" + "cni": + "interface": "kubernetes-cni" + "scope": "container" +"provides": + "nrpe-external-master": + "interface": "nrpe-external-master" + "scope": "container" +"resources": + "flannel-amd64": + "type": "file" + "filename": "flannel.tar.gz" + "description": "A tarball packaged release of flannel for amd64" + "flannel-arm64": + "type": "file" + "filename": "flannel.tar.gz" + "description": "A tarball packaged release of flannel for arm64" + "flannel-s390x": + "type": "file" + "filename": "flannel.tar.gz" + "description": "A tarball packaged release of flannel for s390x" +"subordinate": !!bool "true" diff --git a/kata/pydocmd.yml b/flannel/pydocmd.yml similarity index 100% rename from kata/pydocmd.yml rename to flannel/pydocmd.yml diff --git a/flannel/reactive/__init__.py b/flannel/reactive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/flannel/reactive/flannel.py b/flannel/reactive/flannel.py new file mode 100644 index 0000000..388ca72 --- /dev/null +++ b/flannel/reactive/flannel.py @@ -0,0 +1,359 @@ +import os +import json +from shlex import split +from subprocess import check_output, check_call, CalledProcessError, STDOUT + +from charms.flannel.common import retry + +from charms.reactive import set_state, remove_state, when, when_not, hook +from charms.reactive import when_any +from charms.templating.jinja2 import render +from charmhelpers.core.host import service_start, service_stop, service_restart +from charmhelpers.core.host import service_running, service +from charmhelpers.core.hookenv import log, resource_get +from charmhelpers.core.hookenv import config, application_version_set +from charmhelpers.core.hookenv import network_get +from charmhelpers.contrib.charmsupport import nrpe +from charms.reactive.helpers import data_changed + +from charms.layer import status + + +ETCD_PATH = '/etc/ssl/flannel' +ETCD_KEY_PATH = os.path.join(ETCD_PATH, 'client-key.pem') +ETCD_CERT_PATH = os.path.join(ETCD_PATH, 'client-cert.pem') +ETCD_CA_PATH = os.path.join(ETCD_PATH, 'client-ca.pem') + + +@when_not('flannel.binaries.installed') +def install_flannel_binaries(): + ''' Unpack the Flannel binaries. ''' + try: + resource_name = 'flannel-{}'.format(arch()) + archive = resource_get(resource_name) + except Exception: + message = 'Error fetching the flannel resource.' + log(message) + status.blocked(message) + return + if not archive: + message = 'Missing flannel resource.' + log(message) + status.blocked(message) + return + filesize = os.stat(archive).st_size + if filesize < 1000000: + message = 'Incomplete flannel resource' + log(message) + status.blocked(message) + return + status.maintenance('Unpacking flannel resource.') + charm_dir = os.getenv('CHARM_DIR') + unpack_path = os.path.join(charm_dir, 'files', 'flannel') + os.makedirs(unpack_path, exist_ok=True) + cmd = ['tar', 'xfz', archive, '-C', unpack_path] + log(cmd) + check_call(cmd) + apps = [ + {'name': 'flanneld', 'path': '/usr/local/bin'}, + {'name': 'etcdctl', 'path': '/usr/local/bin'} + ] + for app in apps: + unpacked = os.path.join(unpack_path, app['name']) + app_path = os.path.join(app['path'], app['name']) + install = ['install', '-v', '-D', unpacked, app_path] + check_call(install) + set_state('flannel.binaries.installed') + + +@when('cni.is-worker') +@when_not('flannel.cni.configured') +def configure_cni(cni): + ''' Set up the flannel cni configuration file. ''' + render('10-flannel.conflist', '/etc/cni/net.d/10-flannel.conflist', {}) + set_state('flannel.cni.configured') + + +@when('etcd.tls.available') +@when_not('flannel.etcd.credentials.installed') +def install_etcd_credentials(etcd): + ''' Install the etcd credential files. ''' + etcd.save_client_credentials(ETCD_KEY_PATH, ETCD_CERT_PATH, ETCD_CA_PATH) + set_state('flannel.etcd.credentials.installed') + + +def default_route_interface(): + ''' Returns the network interface of the system's default route ''' + default_interface = None + cmd = ['route'] + output = check_output(cmd).decode('utf8') + for line in output.split('\n'): + if 'default' in line: + default_interface = line.split(' ')[-1] + return default_interface + + +def get_bind_address_interface(): + ''' Returns a non-fan bind-address interface for the cni endpoint. + Falls back to default_route_interface() if bind-address is not available. + ''' + try: + data = network_get('cni') + except NotImplementedError: + # Juju < 2.1 + return default_route_interface() + + if 'bind-addresses' not in data: + # Juju < 2.3 + return default_route_interface() + + for bind_address in data['bind-addresses']: + if bind_address['interfacename'].startswith('fan-'): + continue + return bind_address['interfacename'] + + # If we made it here, we didn't find a non-fan CNI bind-address, which is + # unexpected. Let's log a message and play it safe. + log('Could not find a non-fan bind-address. Using fallback interface.') + return default_route_interface() + + +@when('flannel.binaries.installed', 'flannel.etcd.credentials.installed', + 'etcd.tls.available') +@when_not('flannel.service.installed') +def install_flannel_service(etcd): + ''' Install the flannel service. ''' + status.maintenance('Installing flannel service.') + # keep track of our etcd conn string and cert info so we can detect when it + # changes later + data_changed('flannel_etcd_connections', etcd.get_connection_string()) + data_changed('flannel_etcd_client_cert', etcd.get_client_credentials()) + iface = config('iface') or get_bind_address_interface() + context = {'iface': iface, + 'connection_string': etcd.get_connection_string(), + 'cert_path': ETCD_PATH} + render('flannel.service', '/lib/systemd/system/flannel.service', context) + service('enable', 'flannel') + set_state('flannel.service.installed') + remove_state('flannel.service.started') + + +@when('config.changed.iface') +def reconfigure_flannel_service(): + ''' Handle interface configuration change. ''' + remove_state('flannel.service.installed') + + +@when('etcd.available', 'flannel.service.installed') +def etcd_changed(etcd): + if data_changed('flannel_etcd_connections', etcd.get_connection_string()): + remove_state('flannel.service.installed') + if data_changed('flannel_etcd_client_cert', etcd.get_client_credentials()): + etcd.save_client_credentials(ETCD_KEY_PATH, + ETCD_CERT_PATH, + ETCD_CA_PATH) + remove_state('flannel.service.installed') + + +@when('flannel.binaries.installed', 'flannel.etcd.credentials.installed', + 'etcd.available') +@when_not('flannel.network.configured') +def invoke_configure_network(etcd): + ''' invoke network configuration and adjust states ''' + status.maintenance('Negotiating flannel network subnet.') + if configure_network(etcd): + set_state('flannel.network.configured') + remove_state('flannel.service.started') + else: + status.waiting('Waiting on etcd.') + + +@retry(times=3, delay_secs=20) +def configure_network(etcd): + ''' Store initial flannel data in etcd. + + Returns True if the operation completed successfully. + + ''' + flannel_config = { + 'Network': config('cidr'), + 'Backend': { + 'Type': 'vxlan' + } + } + + vni = config('vni') + if vni: + flannel_config['Backend']['VNI'] = vni + + port = config('port') + if port: + flannel_config['Backend']['Port'] = port + + data = json.dumps(flannel_config) + cmd = "etcdctl " + cmd += "--endpoint '{0}' ".format(etcd.get_connection_string()) + cmd += "--cert-file {0} ".format(ETCD_CERT_PATH) + cmd += "--key-file {0} ".format(ETCD_KEY_PATH) + cmd += "--ca-file {0} ".format(ETCD_CA_PATH) + cmd += "set /coreos.com/network/config '{0}'".format(data) + try: + check_call(split(cmd)) + return True + + except CalledProcessError: + log('Unexpected error configuring network. Assuming etcd not' + ' ready. Will retry in 20s') + return False + + +@when_any('config.changed.cidr', 'config.changed.port', 'config.changed.vni') +def reconfigure_network(): + ''' Trigger the network configuration method. ''' + remove_state('flannel.network.configured') + + +@when('flannel.binaries.installed', 'flannel.service.installed', + 'flannel.network.configured') +@when_not('flannel.service.started') +def start_flannel_service(): + ''' Start the flannel service. ''' + status.maintenance('Starting flannel service.') + if service_running('flannel'): + service_restart('flannel') + else: + service_start('flannel') + set_state('flannel.service.started') + + +@when('cni.connected', 'flannel.service.started') +@when_any('flannel.cni.configured', 'cni.is-master') +@when_not('flannel.cni.available') +def set_available(cni): + ''' Indicate to the CNI provider that we're ready. ''' + cni.set_config(cidr=config('cidr'), cni_conf_file='10-flannel.conflist') + set_state('flannel.cni.available') + + +@when('flannel.binaries.installed') +@when_not('flannel.version.set') +def set_flannel_version(): + ''' Surface the currently deployed version of flannel to Juju ''' + cmd = 'flanneld -version' + version = check_output(split(cmd), stderr=STDOUT).decode('utf-8') + if version: + application_version_set(version.split('v')[-1].strip()) + set_state('flannel.version.set') + + +@when('nrpe-external-master.available') +@when_not('nrpe-external-master.initial-config') +def initial_nrpe_config(nagios=None): + set_state('nrpe-external-master.initial-config') + update_nrpe_config(nagios) + + +@when('flannel.service.started') +@when('nrpe-external-master.available') +@when_any('config.changed.nagios_context', + 'config.changed.nagios_servicegroups') +def update_nrpe_config(unused=None): + # List of systemd services that will be checked + services = ('flannel',) + + # The current nrpe-external-master interface doesn't handle a lot of logic, + # use the charm-helpers code for now. + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname, primary=False) + nrpe.add_init_service_checks(nrpe_setup, services, current_unit) + nrpe_setup.write() + + +@when('flannel.service.started') +@when('flannel.cni.available') +def ready(): + ''' Indicate that flannel is active. ''' + try: + status.active('Flannel subnet ' + get_flannel_subnet()) + except FlannelSubnetNotFound: + status.waiting('Waiting for Flannel') + + +@when_not('etcd.connected') +def halt_execution(): + ''' send a clear message to the user that we are waiting on etcd ''' + status.blocked('Waiting for etcd relation.') + + +@hook('upgrade-charm') +def reset_states_and_redeploy(): + ''' Remove state and redeploy ''' + remove_state('flannel.cni.available') + remove_state('flannel.binaries.installed') + remove_state('flannel.service.started') + remove_state('flannel.version.set') + remove_state('flannel.network.configured') + remove_state('flannel.service.installed') + remove_state('flannel.cni.configured') + try: + log('Deleting /etc/cni/net.d/10-flannel.conf') + os.remove('/etc/cni/net.d/10-flannel.conf') + except FileNotFoundError as e: + log(str(e)) + + +@hook('pre-series-upgrade') +def pre_series_upgrade(): + status.blocked('Series upgrade in progress') + + +@hook('stop') +def cleanup_deployment(): + ''' Terminate services, and remove the deployed bins ''' + service_stop('flannel') + down = 'ip link set flannel.1 down' + delete = 'ip link delete flannel.1' + try: + check_call(split(down)) + check_call(split(delete)) + except CalledProcessError: + log('Unable to remove iface flannel.1') + log('Potential indication that cleanup is not possible') + files = ['/usr/local/bin/flanneld', + '/lib/systemd/system/flannel', + '/lib/systemd/system/flannel.service', + '/run/flannel/subnet.env', + '/usr/local/bin/flanneld', + '/usr/local/bin/etcdctl', + '/etc/cni/net.d/10-flannel.conflist', + ETCD_KEY_PATH, + ETCD_CERT_PATH, + ETCD_CA_PATH] + for f in files: + if os.path.exists(f): + log('Removing {}'.format(f)) + os.remove(f) + + +def get_flannel_subnet(): + ''' Returns the flannel subnet reserved for this unit ''' + try: + with open('/run/flannel/subnet.env') as f: + raw_data = dict(line.strip().split('=') for line in f) + return raw_data['FLANNEL_SUBNET'] + except FileNotFoundError as e: + raise FlannelSubnetNotFound() from e + + +def arch(): + '''Return the package architecture as a string.''' + # Get the package architecture for this system. + architecture = check_output(['dpkg', '--print-architecture']).rstrip() + # Convert the binary result into a string. + architecture = architecture.decode('utf-8') + return architecture + + +class FlannelSubnetNotFound(Exception): + pass diff --git a/kata/reactive/status.py b/flannel/reactive/status.py similarity index 100% rename from kata/reactive/status.py rename to flannel/reactive/status.py diff --git a/kata/requirements.txt b/flannel/requirements.txt similarity index 100% rename from kata/requirements.txt rename to flannel/requirements.txt diff --git a/kata/revision b/flannel/revision similarity index 100% rename from kata/revision rename to flannel/revision diff --git a/flannel/templates/10-flannel.conflist b/flannel/templates/10-flannel.conflist new file mode 100644 index 0000000..b9669c9 --- /dev/null +++ b/flannel/templates/10-flannel.conflist @@ -0,0 +1,18 @@ +{ + "name": "CDK-flannel-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} diff --git a/flannel/templates/cdk.auth-webhook-secret.yaml b/flannel/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/flannel/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/flannel/templates/flannel.service b/flannel/templates/flannel.service new file mode 100644 index 0000000..40a4fdd --- /dev/null +++ b/flannel/templates/flannel.service @@ -0,0 +1,14 @@ +[Unit] +Description=Flannel Overlay Network +Documentation=https://github.com/coreos/flannel +Wants=network-online.target +After=network.target network-online.target + +[Service] +ExecStart=/usr/local/bin/flanneld -iface={{ iface }} -etcd-endpoints={{ connection_string }} -etcd-certfile={{ cert_path }}/client-cert.pem -etcd-keyfile={{ cert_path }}/client-key.pem -etcd-cafile={{ cert_path }}/client-ca.pem --ip-masq +TimeoutStartSec=0 +Restart=on-failure +LimitNOFILE=655536 + +[Install] +WantedBy=multi-user.target diff --git a/flannel/tests/data/bundle.yaml b/flannel/tests/data/bundle.yaml new file mode 100644 index 0000000..96145d6 --- /dev/null +++ b/flannel/tests/data/bundle.yaml @@ -0,0 +1,76 @@ +description: A minimal Kubernetes cluster with two machines with virtual networks provided by Flannel. +series: {{ series }} +machines: + '0': + constraints: cores=4 mem=4G root-disk=16G + series: {{ series }} + '1': + constraints: cores=4 mem=4G root-disk=16G + series: {{ series }} +applications: + containerd: + charm: cs:~containers/containerd + channel: edge + easyrsa: + charm: cs:~containers/easyrsa + channel: edge + num_units: 1 + to: + - '1' + etcd: + charm: cs:~containers/etcd + channel: edge + num_units: 1 + options: + channel: 3.4/stable + to: + - '0' + flannel: + charm: {{ master_charm }} + # This is currently not working due to https://github.com/juju/python-libjuju/issues/223 + # resources: + # {{ flannel_resource_name }}: {{ flannel_resource }} + kubernetes-master: + charm: cs:~containers/kubernetes-master + channel: edge + constraints: cores=4 mem=4G root-disk=16G + expose: true + num_units: 1 + options: + channel: 1.21/stable + to: + - '0' + kubernetes-worker: + charm: cs:~containers/kubernetes-worker + channel: edge + constraints: cores=4 mem=4G root-disk=16G + expose: true + num_units: 1 + options: + channel: 1.21/stable + to: + - '1' + +relations: +- - kubernetes-master:kube-api-endpoint + - kubernetes-worker:kube-api-endpoint +- - kubernetes-master:kube-control + - kubernetes-worker:kube-control +- - kubernetes-master:certificates + - easyrsa:client +- - kubernetes-master:etcd + - etcd:db +- - kubernetes-worker:certificates + - easyrsa:client +- - etcd:certificates + - easyrsa:client +- - flannel:etcd + - etcd:db +- - flannel:cni + - kubernetes-master:cni +- - flannel:cni + - kubernetes-worker:cni +- - containerd:containerd + - kubernetes-worker:container-runtime +- - containerd:containerd + - kubernetes-master:container-runtime diff --git a/flannel/tests/functional/conftest.py b/flannel/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/flannel/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/flannel/tests/functional/test_k8s_common.py b/flannel/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/flannel/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/flannel/tests/integration/conftest.py b/flannel/tests/integration/conftest.py new file mode 100644 index 0000000..331a19b --- /dev/null +++ b/flannel/tests/integration/conftest.py @@ -0,0 +1,28 @@ +from pathlib import Path + +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--flannel-version", nargs="?", type=str, default="amd64", + choices=["amd64", "arm64", "s390x"], + help="The version of flannel resource. [amd64/arm64/s390x]" + ) + parser.addoption( + "--flannel-resource", nargs="?", type=Path, + default=Path(__file__).parent.resolve()/".."/".."/"flannel-amd64.tar.gz", + help="The path to the flannel resource. It can be compiled with " + "`./build-flannel-resources.sh`, see README.md for more information." + ) + + +@pytest.fixture() +def flannel_resource(pytestconfig): + version = pytestconfig.getoption("--flannel-version") + path = pytestconfig.getoption("--flannel-resource") + if not path.exists(): + raise FileNotFoundError("Missing resource, please provide via" + "--flannel-resource option or at {}".format(path)) + + return f"flannel-{version}={path}" # noqa: E999 diff --git a/flannel/tests/integration/test_flannel_integration.py b/flannel/tests/integration/test_flannel_integration.py new file mode 100644 index 0000000..b58f062 --- /dev/null +++ b/flannel/tests/integration/test_flannel_integration.py @@ -0,0 +1,135 @@ +import json +import logging +import re +from ipaddress import ip_address, ip_network +from time import sleep + +import pytest +from kubernetes import client +from kubernetes.config import load_kube_config_from_dict + +log = logging.getLogger(__name__) + + +def _get_flannel_subnet_ip(unit): + """Get subnet IP address.""" + subnet = re.findall(r"[0-9]+(?:\.[0-9]+){3}", unit.workload_status_message)[0] + return ip_address(subnet) + + +async def _get_kubeconfig(model): + """Get kubeconfig from kubernetes-master.""" + unit = model.applications["kubernetes-master"].units[0] + action = await unit.run_action("get-kubeconfig") + output = await action.wait() # wait for result + return json.loads(output.data.get("results", {}).get("kubeconfig", "{}")) + + +async def _create_test_pod(model): + """Create tests pod and return spec.""" + # load kubernetes config + kubeconfig = await _get_kubeconfig(model) + load_kube_config_from_dict(kubeconfig) + + api = client.CoreV1Api() + pod_manifest = { + "apiVersion": "v1", + "kind": "Pod", + "metadata": {"name": "test"}, + "spec": { + "containers": [ + {"image": "busybox", "name": "test", "args": ["echo", "\"test\""]} + ] + } + } + resp = api.create_namespaced_pod(body=pod_manifest, namespace="default") + # wait for pod not to be in pending + i = 0 + while resp.status.phase == "Pending" and i < 30: + i += 1 + sleep(10) + resp = api.read_namespaced_pod("test", namespace="default") + + api.delete_namespaced_pod("test", namespace="default") + return resp + + +async def validate_flannel_cidr_network(ops_test): + """Validate network CIDR assign to Flannel.""" + flannel = ops_test.model.applications["flannel"] + flannel_config = await flannel.get_config() + cidr_network = ip_network(flannel_config.get("cidr", {}).get("value")) + + for unit in flannel.units: + assert unit.workload_status == "active" + assert _get_flannel_subnet_ip(unit) in cidr_network + + # create test pod + resp = await _create_test_pod(ops_test.model) + assert ip_address(resp.status.pod_ip) in cidr_network, \ + "the new pod does not get the ip address in the cidr network" + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test, flannel_resource): + """Build and deploy Flannel in bundle.""" + flannel_charm = await ops_test.build_charm(".") + + # Work around libjuju not handling local file resources by manually + # pre-deploying the charm w/ resource via the CLI. See + # https://github.com/juju/python-libjuju/issues/223 + rc, stdout, stderr = await ops_test.run( + "juju", + "deploy", + "-m", ops_test.model_full_name, + flannel_charm, + "--resource", flannel_resource, + ) + assert rc == 0, f"Failed to deploy with resource: {stderr or stdout}" # noqa: E999 + + bundle = ops_test.render_bundle( + "tests/data/bundle.yaml", + master_charm=flannel_charm, + series="focal", + # flannel_resource_name=flannel_resource_name, # This doesn't work currently + # flannel_resource=flannel_resource, # This doesn't work currently + ) + await ops_test.model.deploy(bundle) + + # This configuration is needed due testing on top of LXD containers. + # https://bugs.launchpad.net/charm-kubernetes-worker/+bug/1903566 + await ops_test.model.applications["kubernetes-worker"].set_config({ + "kubelet-extra-config": "{protectKernelDefaults: false}" + }) + + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60, + idle_period=60) + + +async def test_status_messages(ops_test): + """Validate that the status messages are correct.""" + await validate_flannel_cidr_network(ops_test) + + +async def test_change_cidr_network(ops_test): + """Test configuration change.""" + flannel = ops_test.model.applications["flannel"] + await flannel.set_config({"cidr": "10.2.0.0/16"}) + rc, stdout, stderr = await ops_test.run( + "juju", "run", "-m", ops_test.model_full_name, "--application", "flannel", + "--", "hooks/config-changed" + ) + assert rc == 0, f"Failed to run hook with resource: {stderr or stdout}" + + # note (rgildein): There is need to restart kubernetes-worker machine. + # https://bugs.launchpad.net/charm-flannel/+bug/1932551 + k8s_worker = ops_test.model.applications["kubernetes-worker"].units[0] + rc, stdout, stderr = await ops_test.run( + "juju", "ssh", "-m", ops_test.model_full_name, f"{k8s_worker.name}", + "--", "sudo reboot now" + ) + assert rc in [0, 255], (f"Failed to restart kubernetes-worker with " + f"resource: {stderr or stdout}") + + await ops_test.model.wait_for_idle(wait_for_active=True, idle_period=60) + await validate_flannel_cidr_network(ops_test) diff --git a/flannel/tests/unit/conftest.py b/flannel/tests/unit/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/flannel/tests/unit/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/flannel/tests/unit/test_flannel.py b/flannel/tests/unit/test_flannel.py new file mode 100644 index 0000000..62e1b08 --- /dev/null +++ b/flannel/tests/unit/test_flannel.py @@ -0,0 +1,21 @@ +from unittest.mock import MagicMock +from reactive import flannel +from charmhelpers.core import hookenv +from charms.reactive import set_state + + +def test_set_available(): + cni = MagicMock() + hookenv.config.return_value = '192.168.0.0/16' + flannel.set_available(cni) + cni.set_config.assert_called_once_with( + cidr='192.168.0.0/16', + cni_conf_file='10-flannel.conflist' + ) + set_state.assert_called_once_with('flannel.cni.available') + + +def test_series_upgrade(): + assert flannel.status.blocked.call_count == 0 + flannel.pre_series_upgrade() + assert flannel.status.blocked.call_count == 1 diff --git a/flannel/tests/unit/test_k8s_common.py b/flannel/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..0dcad31 --- /dev/null +++ b/flannel/tests/unit/test_k8s_common.py @@ -0,0 +1,122 @@ +import json +import string +from subprocess import CalledProcessError +from unittest.mock import Mock + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" diff --git a/flannel/tox.ini b/flannel/tox.ini new file mode 100644 index 0000000..6427219 --- /dev/null +++ b/flannel/tox.ini @@ -0,0 +1,37 @@ +[tox] +skipsdist = True +envlist = lint,unit + +[flake8] +max-line-length = 88 + +[tox:travis] +3.5: lint,unit +3.6: lint,unit +3.7: lint,unit + +[testenv] +basepython = python3 +setenv = + PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace + +[testenv:unit] +deps = + pyyaml + pytest + ipdb + git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test +commands = pytest --tb native -s {posargs} {toxinidir}/tests/unit + +[testenv:lint] +deps = flake8 +commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests + +[testenv:integration] +deps = + pytest + pytest-operator + kubernetes + ipdb +commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration diff --git a/flannel/version b/flannel/version new file mode 100644 index 0000000..20817dd --- /dev/null +++ b/flannel/version @@ -0,0 +1 @@ +ccfa68be \ No newline at end of file diff --git a/kata/wheelhouse.txt b/flannel/wheelhouse.txt similarity index 86% rename from kata/wheelhouse.txt rename to flannel/wheelhouse.txt index 4413da7..5e9d1bf 100644 --- a/kata/wheelhouse.txt +++ b/flannel/wheelhouse.txt @@ -16,6 +16,8 @@ wheel<0.34 # pin netaddr to avoid pulling importlib-resources netaddr<=0.7.19 -# kata -requests +# flannel +charms.templating.jinja2>=1.0.0,<2.0.0 +python-etcd>=0.4.0,<1.0.0 +dnspython<2.0.0 diff --git a/flannel/wheelhouse/dnspython-1.16.0.zip b/flannel/wheelhouse/dnspython-1.16.0.zip new file mode 100644 index 0000000..98fd10a Binary files /dev/null and b/flannel/wheelhouse/dnspython-1.16.0.zip differ diff --git a/kata/wheelhouse/setuptools-41.6.0.zip b/flannel/wheelhouse/setuptools-41.6.0.zip similarity index 100% rename from kata/wheelhouse/setuptools-41.6.0.zip rename to flannel/wheelhouse/setuptools-41.6.0.zip diff --git a/kata/.travis.yml b/kata/.travis.yml deleted file mode 100644 index 694ddcb..0000000 --- a/kata/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -dist: bionic -language: python -python: - - "3.5" - - "3.6" - - "3.7" - - "3.8" -install: - - pip install tox-travis -script: - - tox diff --git a/kata/.travis/profile-update.yaml b/kata/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/kata/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/kata/README.md b/kata/README.md deleted file mode 100644 index 39cb53b..0000000 --- a/kata/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Charm for Kata Containers - -This subordinate charm deploys the [Kata](https://katacontainers.io/) -untrusted container runtime within a running Juju charm model. - -This charm is maintained along with the components of Charmed Kubernetes. -For full information, please visit the official [Charmed Kubernetes docs](https://ubuntu.com/kubernetes/docs/charm-kata). diff --git a/kata/hooks/relations/container-runtime/.gitignore b/kata/hooks/relations/container-runtime/.gitignore deleted file mode 100644 index 894a44c..0000000 --- a/kata/hooks/relations/container-runtime/.gitignore +++ /dev/null @@ -1,104 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ diff --git a/kata/hooks/relations/container-runtime/README.md b/kata/hooks/relations/container-runtime/README.md deleted file mode 100644 index 4620013..0000000 --- a/kata/hooks/relations/container-runtime/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# interface-container-runtime - -## Overview - -This interface handles communication between subordinate charms, that provide a container runtime and charms requiring a container runtime. - -## Usage - -### Provides - -The providing side of the container interface provides a place for a container runtime to connect to. - -Your charm should respond to the `endpoint.{endpoint_name}.available` state, -which indicates that there is a container runtime connected. - -A trivial example of handling this interface would be: - -```python -@when('endpoint.containerd.joined') -def update_kubelet_config(containerd): - endpoint = endpoint_from_flag('endpoint.containerd.joined') - config = endpoint.get_config() - kubelet.config['container-runtime'] = \ - config['runtime'] -``` - -### Requires - -The requiring side of the container interface requires a place for a container runtime to connect to. - -Your charm should set `{endpoint_name}.available` state, -which indicates that the container is runtime connected. - -A trivial example of handling this interface would be: - -```python -@when('endpoint.containerd.joined') -def pubish_config(): - endpoint = endpoint_from_flag('endpoint.containerd.joined') - endpoint.set_config( - socket='unix:///var/run/containerd/containerd.sock', - runtime='remote', - nvidia_enabled=False - ) -``` diff --git a/kata/hooks/relations/container-runtime/interface.yaml b/kata/hooks/relations/container-runtime/interface.yaml deleted file mode 100644 index 294be1e..0000000 --- a/kata/hooks/relations/container-runtime/interface.yaml +++ /dev/null @@ -1,4 +0,0 @@ -name: container-runtime -summary: Interface for relating to container runtimes -version: 1 -maintainer: "Joe Borg " diff --git a/kata/hooks/relations/container-runtime/provides.py b/kata/hooks/relations/container-runtime/provides.py deleted file mode 100644 index a9768a8..0000000 --- a/kata/hooks/relations/container-runtime/provides.py +++ /dev/null @@ -1,55 +0,0 @@ -from charms.reactive import ( - Endpoint, - toggle_flag -) - - -class ContainerRuntimeProvides(Endpoint): - def manage_flags(self): - toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), - self.is_joined) - - def _get_config(self, key): - """ - Get the published configuration for a given key. - - :param key: String dict key - :return: String value for given key - """ - return self.all_joined_units.received.get(key) - - def get_nvidia_enabled(self): - """ - Get the published nvidia config. - - :return: String - """ - return self._get_config(key='nvidia_enabled') - - def get_runtime(self): - """ - Get the published runtime config. - - :return: String - """ - return self._get_config(key='runtime') - - def get_socket(self): - """ - Get the published socket config. - - :return: String - """ - return self._get_config(key='socket') - - def set_config(self, sandbox_image=None): - """ - Set the configuration to be published. - - :param sandbox_image: String to optionally override the sandbox image - :return: None - """ - for relation in self.relations: - relation.to_publish.update({ - 'sandbox_image': sandbox_image - }) diff --git a/kata/hooks/relations/container-runtime/requires.py b/kata/hooks/relations/container-runtime/requires.py deleted file mode 100644 index c461b68..0000000 --- a/kata/hooks/relations/container-runtime/requires.py +++ /dev/null @@ -1,61 +0,0 @@ -from charms.reactive import ( - Endpoint, - clear_flag, - data_changed, - is_data_changed, - toggle_flag -) - - -class ContainerRuntimeRequires(Endpoint): - def manage_flags(self): - toggle_flag(self.expand_name('endpoint.{endpoint_name}.available'), - self.is_joined) - toggle_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure'), - self.is_joined and self._config_changed()) - - def _config_changed(self): - """ - Determine if our received data has changed. - - :return: Boolean - """ - # NB: this call should match whatever we're tracking in handle_remote_config - return is_data_changed('containerd.remote_config', - [self.get_sandbox_image()]) - - def handle_remote_config(self): - """ - Keep track of received data so we can know if it changes. - - :return: None - """ - clear_flag(self.expand_name('endpoint.{endpoint_name}.reconfigure')) - # Presently, we only care about one piece of remote config. Expand - # the list as needed. - data_changed('containerd.remote_config', - [self.get_sandbox_image()]) - - def get_sandbox_image(self): - """ - Get the sandbox image URI if a remote has published one. - - :return: String: remotely configured sandbox image - """ - return self.all_joined_units.received.get('sandbox_image') - - def set_config(self, socket, runtime, nvidia_enabled): - """ - Set the configuration to be published. - - :param socket: String uri to runtime socket - :param runtime: String runtime executable - :param nvidia_enabled: Boolean nvidia runtime enabled - :return: None - """ - for relation in self.relations: - relation.to_publish.update({ - 'socket': socket, - 'runtime': runtime, - 'nvidia_enabled': nvidia_enabled - }) diff --git a/kata/hooks/relations/untrusted-container-runtime/.gitignore b/kata/hooks/relations/untrusted-container-runtime/.gitignore deleted file mode 100644 index 894a44c..0000000 --- a/kata/hooks/relations/untrusted-container-runtime/.gitignore +++ /dev/null @@ -1,104 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ diff --git a/kata/hooks/relations/untrusted-container-runtime/README.md b/kata/hooks/relations/untrusted-container-runtime/README.md deleted file mode 100644 index 135dca5..0000000 --- a/kata/hooks/relations/untrusted-container-runtime/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# interface-untrusted-container-runtime - -## Overview - -This interface handles communication between subordinate container runtimes -and this subordinate untrusted container runtime, such as `containerd` and -`kata-containers`. - -## Usage - -### Provides - -The providing side of the container interface provides a place for an -untrusted container runtime to connect to. - -Your charm should respond to the `endpoint.{endpoint_name}.available` state, -which indicates that there is an untrusted container runtime connected. - -A trivial example of handling this interface would be: - -```python -@when('endpoint.containerd.joined') -def update_kubelet_config(containerd): - endpoint = endpoint_from_flag('endpoint.containerd.joined') - config = endpoint.get_config() - - render( - 'config.toml', - { - 'runtime_name': config['name'], - 'runtime_binary': config['binary_path'] - } - ) -``` - -### Requires - -The requiring side of the untrusted container interface requires a place for -an untrusted container runtime to connect to. - -Your charm should set `{endpoint_name}.available` state, -which indicates that the container is runtime connected. - -A trivial example of handling this interface would be: - -```python -@when('endpoint.containerd.joined') -def pubish_config(): - endpoint = endpoint_from_flag('endpoint.containerd.joined') - endpoint.set_config( - 'name': 'kata', - 'binary_path': '/usr/bin/kata-runtime' - ) -``` diff --git a/kata/hooks/relations/untrusted-container-runtime/interface.yaml b/kata/hooks/relations/untrusted-container-runtime/interface.yaml deleted file mode 100644 index d0d7dbc..0000000 --- a/kata/hooks/relations/untrusted-container-runtime/interface.yaml +++ /dev/null @@ -1,4 +0,0 @@ -name: untrusted-container-runtime -summary: Interface for relating to untrusted container runtimes -version: 1 -maintainer: "Joe Borg " diff --git a/kata/hooks/relations/untrusted-container-runtime/provides.py b/kata/hooks/relations/untrusted-container-runtime/provides.py deleted file mode 100644 index 09deb26..0000000 --- a/kata/hooks/relations/untrusted-container-runtime/provides.py +++ /dev/null @@ -1,28 +0,0 @@ -from charms.reactive import ( - Endpoint, - set_flag, - clear_flag -) - -from charms.reactive import ( - when, - when_not -) - - -class ContainerRuntimeProvides(Endpoint): - @when('endpoint.{endpoint_name}.joined') - def joined(self): - set_flag(self.expand_name('endpoint.{endpoint_name}.available')) - - @when_not('endpoint.{endpoint_name}.joined') - def broken(self): - clear_flag(self.expand_name('endpoint.{endpoint_name}.available')) - - def get_config(self): - """ - Get the configuration published. - - :return: Dictionary configuration - """ - return self.all_joined_units.received diff --git a/kata/hooks/relations/untrusted-container-runtime/requires.py b/kata/hooks/relations/untrusted-container-runtime/requires.py deleted file mode 100644 index f717ba6..0000000 --- a/kata/hooks/relations/untrusted-container-runtime/requires.py +++ /dev/null @@ -1,34 +0,0 @@ -from charms.reactive import ( - Endpoint, - set_flag, - clear_flag -) - -from charms.reactive import ( - when, - when_not -) - - -class ContainerRuntimeRequires(Endpoint): - @when('endpoint.{endpoint_name}.changed') - def changed(self): - set_flag(self.expand_name('endpoint.{endpoint_name}.available')) - - @when_not('endpoint.{endpoint_name}.joined') - def broken(self): - clear_flag(self.expand_name('endpoint.{endpoint_name}.available')) - - def set_config(self, name, binary_path): - """ - Set the configuration to be published. - - :param name: String name of runtime - :param binary_path: String runtime executable - :return: None - """ - for relation in self.relations: - relation.to_publish.update({ - 'name': name, - 'binary_path': binary_path - }) diff --git a/kata/icon.svg b/kata/icon.svg deleted file mode 100644 index 96a5d0c..0000000 --- a/kata/icon.svg +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - diff --git a/kata/metadata.yaml b/kata/metadata.yaml deleted file mode 100644 index 767db6d..0000000 --- a/kata/metadata.yaml +++ /dev/null @@ -1,29 +0,0 @@ -"name": "kata" -"summary": "Kata untrusted container runtime subordinate" -"maintainers": -- "Joe Borg " -"description": | - Kata Containers is an open source community working to build a secure - container runtime with lightweight virtual machines that feel and perform - like containers, but provide stronger workload isolation using hardware - virtualization technology as a second layer of defense. -"tags": -- "misc" -- "containers" -"series": -- "focal" -- "bionic" -"requires": - "containerd": - "interface": "container-runtime" - "scope": "container" - "untrusted": - "interface": "untrusted-container-runtime" - "scope": "container" -"resources": - "kata-archive": - "type": "file" - "filename": "kata-archive.tar.gz" - "description": "Offline archive of kata" - -"subordinate": !!bool "true" diff --git a/kata/reactive/kata.py b/kata/reactive/kata.py deleted file mode 100644 index e11c45f..0000000 --- a/kata/reactive/kata.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import requests - -from subprocess import ( - check_call, - check_output -) - -from charmhelpers.core import host - -from charms.reactive import ( - when, - when_not, - set_state, - remove_state, - endpoint_from_flag, - hook, -) - -from charmhelpers.fetch import ( - apt_install, - apt_update, - apt_purge, - apt_autoremove, - import_key -) - -from charmhelpers.core.hookenv import ( - resource_get -) - -from charms.layer import status - - -KATA_PACKAGES = [ - 'kata-runtime', - 'kata-proxy', - 'kata-shim' -] - - -@when_not('kata.installed') -@when_not('endpoint.untrusted.departed') -def install_kata(): - """ - Install the Kata container runtime. - - :returns: None - """ - dist = host.lsb_release() - release = '{}_{}'.format( - dist['DISTRIB_ID'], - dist['DISTRIB_RELEASE'] - ) - - arch = check_output(['arch']).decode().strip() - - archive = resource_get('kata-archive') - - if not archive or os.path.getsize(archive) == 0: - status.maintenance('Installing Kata via apt') - gpg_key = requests.get( - 'http://download.opensuse.org/repositories/home:/katacontainers:/' - 'releases:/{}:/master/x{}/Release.key'.format(arch, release)).text - import_key(gpg_key) - - with open('/etc/apt/sources.list.d/kata-containers.list', 'w') as f: - f.write( - 'deb http://download.opensuse.org/repositories/home:/' - 'katacontainers:/releases:/{}:/master/x{}/ /' - .format(arch, release) - ) - - apt_update() - apt_install(KATA_PACKAGES) - - else: - status.maintenance('Installing Kata via resource') - unpack = '/tmp/kata-debs' - - if not os.path.isdir(unpack): - os.makedirs(unpack, exist_ok=True) - - check_call(['tar', '-xvf', archive, '-C', unpack]) - check_call('apt-get install -y {}/*.deb'.format(unpack), shell=True) - - status.active('Kata runtime available') - set_state('kata.installed') - - -@when('endpoint.untrusted.departed') -def purge_kata(): - """ - Purge Kata containers. - - :return: None - """ - status.maintenance('Purging Kata') - - apt_purge(KATA_PACKAGES, fatal=False) - - source = '/etc/apt/sources.list.d/kata-containers.list' - if os.path.isfile(source): - os.remove(source) - - apt_autoremove() - - remove_state('kata.installed') - - -@when('kata.installed') -@when('endpoint.untrusted.joined') -@when_not('endpoint.untrusted.departed') -def publish_config(): - """ - Pass configuration over the interface. - - :return: None - """ - endpoint = endpoint_from_flag('endpoint.untrusted.joined') - endpoint.set_config( - name='kata', - binary_path='/usr/bin/kata-runtime' - ) - - -@hook('pre-series-upgrade') -def pre_series_upgrade(): - """Set status during series upgrade.""" - status.blocked('Series upgrade in progress') - - -@hook('post-series-upgrade') -def post_series_upgrade(): - """Reset status to active after series upgrade.""" - status.active('Kata runtime available') diff --git a/kata/tests/test_kata_reactive.py b/kata/tests/test_kata_reactive.py deleted file mode 100644 index 421b0dd..0000000 --- a/kata/tests/test_kata_reactive.py +++ /dev/null @@ -1,35 +0,0 @@ -from reactive import kata - - -def test_packages_list(): - """Assert KATA_PACKAGES is a list of strings.""" - assert isinstance(kata.KATA_PACKAGES, list) - for item in kata.KATA_PACKAGES: - assert isinstance(item, str) - - -def test_install_kata(): - """Assert install_kata is a method.""" - assert callable(kata.install_kata) - - -def test_purge_kata(): - """Assert purge_kata is a method.""" - assert callable(kata.purge_kata) - - -def test_publist_config(): - """Assert publish_config is a method.""" - assert callable(kata.publish_config) - - -def test_series_upgrade(): - """Assert status is set during series upgrade.""" - assert kata.status.blocked.call_count == 0 - assert kata.status.active.call_count == 0 - kata.pre_series_upgrade() - assert kata.status.blocked.call_count == 1 - assert kata.status.active.call_count == 0 - kata.post_series_upgrade() - assert kata.status.blocked.call_count == 1 - assert kata.status.active.call_count == 1 diff --git a/kata/tox.ini b/kata/tox.ini deleted file mode 100644 index 0c9eaec..0000000 --- a/kata/tox.ini +++ /dev/null @@ -1,34 +0,0 @@ -[flake8] -max-line-length = 120 -ignore = D100 - -[tox] -skipsdist = True -envlist = lint,py3 - -[tox:travis] -3.5: lint,py3 -3.6: lint, py3 -3.7: lint, py3 -3.8: lint, py3 - -[testenv] -basepython = python3 -setenv = - PYTHONPATH={toxinidir} -deps = - pyyaml - pytest - pytest-cov - flake8 - flake8-docstrings - requests - git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test -commands = - pytest --cov-report term-missing \ - --cov reactive.kata --cov-fail-under 30 \ - --tb native -s {posargs} - -[testenv:lint] -envdir = {toxworkdir}/py3 -commands = flake8 {toxinidir}/reactive {toxinidir}/tests diff --git a/kata/version b/kata/version deleted file mode 100644 index 91808cc..0000000 --- a/kata/version +++ /dev/null @@ -1 +0,0 @@ -0ea81f0c \ No newline at end of file diff --git a/kata/wheelhouse/Jinja2-2.10.1.tar.gz b/kata/wheelhouse/Jinja2-2.10.1.tar.gz deleted file mode 100644 index ffd1054..0000000 Binary files a/kata/wheelhouse/Jinja2-2.10.1.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/MarkupSafe-1.1.1.tar.gz b/kata/wheelhouse/MarkupSafe-1.1.1.tar.gz deleted file mode 100644 index a6dad8e..0000000 Binary files a/kata/wheelhouse/MarkupSafe-1.1.1.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/PyYAML-5.2.tar.gz b/kata/wheelhouse/PyYAML-5.2.tar.gz deleted file mode 100644 index 666d12a..0000000 Binary files a/kata/wheelhouse/PyYAML-5.2.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/Tempita-0.5.2.tar.gz b/kata/wheelhouse/Tempita-0.5.2.tar.gz deleted file mode 100644 index 755befc..0000000 Binary files a/kata/wheelhouse/Tempita-0.5.2.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/certifi-2021.5.30.tar.gz b/kata/wheelhouse/certifi-2021.5.30.tar.gz deleted file mode 100644 index 4eb2a5f..0000000 Binary files a/kata/wheelhouse/certifi-2021.5.30.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/charmhelpers-0.20.22.tar.gz b/kata/wheelhouse/charmhelpers-0.20.22.tar.gz deleted file mode 100644 index bd5d222..0000000 Binary files a/kata/wheelhouse/charmhelpers-0.20.22.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/charms.reactive-1.4.1.tar.gz b/kata/wheelhouse/charms.reactive-1.4.1.tar.gz deleted file mode 100644 index 03bc1fe..0000000 Binary files a/kata/wheelhouse/charms.reactive-1.4.1.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/charset-normalizer-2.0.3.tar.gz b/kata/wheelhouse/charset-normalizer-2.0.3.tar.gz deleted file mode 100644 index 804b4cc..0000000 Binary files a/kata/wheelhouse/charset-normalizer-2.0.3.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/idna-3.2.tar.gz b/kata/wheelhouse/idna-3.2.tar.gz deleted file mode 100644 index 6febfb2..0000000 Binary files a/kata/wheelhouse/idna-3.2.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/netaddr-0.7.19.tar.gz b/kata/wheelhouse/netaddr-0.7.19.tar.gz deleted file mode 100644 index cc31d9d..0000000 Binary files a/kata/wheelhouse/netaddr-0.7.19.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/pbr-5.6.0.tar.gz b/kata/wheelhouse/pbr-5.6.0.tar.gz deleted file mode 100644 index 0d5c965..0000000 Binary files a/kata/wheelhouse/pbr-5.6.0.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/pip-18.1.tar.gz b/kata/wheelhouse/pip-18.1.tar.gz deleted file mode 100644 index a18192d..0000000 Binary files a/kata/wheelhouse/pip-18.1.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/pyaml-20.4.0.tar.gz b/kata/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/kata/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/setuptools_scm-1.17.0.tar.gz b/kata/wheelhouse/setuptools_scm-1.17.0.tar.gz deleted file mode 100644 index 43b16c7..0000000 Binary files a/kata/wheelhouse/setuptools_scm-1.17.0.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/urllib3-1.26.6.tar.gz b/kata/wheelhouse/urllib3-1.26.6.tar.gz deleted file mode 100644 index 5b90623..0000000 Binary files a/kata/wheelhouse/urllib3-1.26.6.tar.gz and /dev/null differ diff --git a/kata/wheelhouse/wheel-0.33.6.tar.gz b/kata/wheelhouse/wheel-0.33.6.tar.gz deleted file mode 100644 index c922c4e..0000000 Binary files a/kata/wheelhouse/wheel-0.33.6.tar.gz and /dev/null differ diff --git a/keepalived/.build.manifest b/keepalived/.build.manifest index 07c063f..cf1dfc6 100644 --- a/keepalived/.build.manifest +++ b/keepalived/.build.manifest @@ -6,8 +6,8 @@ "url": "layer:options" }, { - "branch": "refs/heads/stable", - "rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16", + "branch": "refs/heads/master\nrefs/heads/stable", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { @@ -17,7 +17,7 @@ }, { "branch": "refs/heads/stable", - "rev": "348a4a770068a42afec8230ea167346689baafd2", + "rev": "53a31f0c00b5625162024e47b0788d2f640b817c", "url": "keepalived" }, { @@ -42,16 +42,16 @@ "dynamic", "unchecked" ], + ".github/workflows/main.yml": [ + "layer:basic", + "static", + "96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473" + ], ".gitignore": [ "layer:status", "static", "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "LICENSE": [ "layer:status", "static", @@ -395,7 +395,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -465,7 +465,7 @@ "version": [ "keepalived", "dynamic", - "a3bff55840dcf7d1866186038c890d78b43261e77ecf1e0f6378daf4a7fe3e21" + "84c2c1461c9a1c34c65d7e620e19234566d6e8bd5eb279ee556d0750321b2046" ], "wheelhouse.txt": [ "layer:basic", @@ -492,10 +492,10 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.22.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", @@ -517,10 +517,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", diff --git a/keepalived/.github/workflows/main.yml b/keepalived/.github/workflows/main.yml new file mode 100644 index 0000000..565bfaf --- /dev/null +++ b/keepalived/.github/workflows/main.yml @@ -0,0 +1,50 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.5, 3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox -e flake8 + functional-test: + name: Functional test with LXD + runs-on: ubuntu-latest + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Dependencies + run: | + pip install tox + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + - name: Run test + run: tox -e func + - name: Show Status + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju status -m "$model" + - name: Show Error Logs + if: ${{ always() }} + run: | + model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/); + juju debug-log -m "$model" --replay --no-tail --level ERROR diff --git a/keepalived/.travis/profile-update.yaml b/keepalived/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/keepalived/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/keepalived/lib/charms/layer/basic.py b/keepalived/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/keepalived/lib/charms/layer/basic.py +++ b/keepalived/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/keepalived/version b/keepalived/version index 91808cc..20817dd 100644 --- a/keepalived/version +++ b/keepalived/version @@ -1 +1 @@ -0ea81f0c \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz b/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz deleted file mode 100644 index bd5d222..0000000 Binary files a/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz and /dev/null differ diff --git a/keepalived/wheelhouse/charmhelpers-0.20.23.tar.gz b/keepalived/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/keepalived/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/keepalived/wheelhouse/pyaml-20.4.0.tar.gz b/keepalived/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/keepalived/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/keepalived/wheelhouse/pyaml-21.10.1.tar.gz b/keepalived/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/keepalived/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubeapi-load-balancer/.build.manifest b/kubeapi-load-balancer/.build.manifest index 0359a9e..ccd1c4b 100644 --- a/kubeapi-load-balancer/.build.manifest +++ b/kubeapi-load-balancer/.build.manifest @@ -1,87 +1,92 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", "url": "layer:metrics" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", "url": "layer:nagios" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34", "url": "layer:apt" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "672d27695b512e50f51777b1eb63c5ff157b3d9e", "url": "layer:nginx" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", "url": "layer:tls-client" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "763297a075b3654f261af20c84b940d87f55354e", + "branch": "refs/heads/master", + "rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156", "url": "layer:kubernetes-common" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a8f88f16bb7771807a0f7fdb17ee16b0e310fc2b", "url": "layer:hacluster" }, { - "branch": "refs/heads/stable", - "rev": "74da66505e2e8470cd47ed0c1d56fcec843da87b", + "branch": "refs/heads/master", + "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", + "url": "layer:leadership" + }, + { + "branch": "refs/heads/master", + "rev": "64ca102a51286f1b4c0e8e5820912c7affc5806f", "url": "kubeapi-load-balancer" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc", "url": "interface:nrpe-external-master" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", "url": "interface:http" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d9850016d930a6d507b9fd45e2598d327922b140", "url": "interface:tls-certificates" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "6c611a3c61909fda411f7a79af53908ec7bef2c8", + "branch": "refs/heads/master", + "rev": "8125a7baecccf9b0869e515b92300dde3a86f31b", "url": "interface:hacluster" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27", "url": "interface:public-address" } @@ -92,26 +97,16 @@ "dynamic", "unchecked" ], - ".github/workflows/build.yml": [ + ".github/workflows/main.yml": [ "kubeapi-load-balancer", "static", - "f6bb08b7b2cffefc0cacdee5bb1c356f30782dbc6be5591f2db186fcd446d43f" - ], - ".github/workflows/tox.yaml": [ - "kubeapi-load-balancer", - "static", - "c323f9ca1fe5bf1369f80d8958be49ad8fd2f6635528865017c357591d31542e" + "1ab5c85a6d4c418917573011f6da399d86be327eaec650a8c9e3d1e3159c44e1" ], ".gitignore": [ "kubeapi-load-balancer", "static", "3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "AUTHORS": [ "layer:nginx", "static", @@ -128,9 +123,9 @@ "58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd" ], "Makefile": [ - "kubeapi-load-balancer", + "layer:basic", "static", - "49ced5fd917cecc5aa65c83ffa2a829de8e02e7c0fb8c0e88163064e7b93f8af" + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" ], "README.md": [ "kubeapi-load-balancer", @@ -177,6 +172,11 @@ "static", "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629" ], + "copyright.layer-leadership": [ + "layer:leadership", + "static", + "8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9" + ], "copyright.layer-metrics": [ "layer:metrics", "static", @@ -347,6 +347,31 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], + "hooks/lb-consumers-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/lb-consumers-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/leader-elected": [ "layer:basic", "dynamic", @@ -432,11 +457,6 @@ "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "hooks/relations/hacluster/common.py": [ - "interface:hacluster", - "static", - "cd9f765e2c3ff64a592c8e144a36783e48c1033413cbece2c4f579195cb7ff5e" - ], "hooks/relations/hacluster/copyright": [ "interface:hacluster", "static", @@ -445,17 +465,27 @@ "hooks/relations/hacluster/interface.yaml": [ "interface:hacluster", "static", - "51bcf4e36b973600d567cf96783bdee3eaa6e164275f70b69e2e47e3468c8c8b" + "5f4e6c8d7b2884bdceeee422821f4db7163dbfa7994d86cb405ffef2c3dea43c" + ], + "hooks/relations/hacluster/interface_hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/interface_hacluster/common.py": [ + "interface:hacluster", + "static", + "abcc0d2940d142976ccfa3fa7518227549ee13041292af7ad61101a7d0c02f7e" ], "hooks/relations/hacluster/requires.py": [ "interface:hacluster", "static", - "eb752e55844ffbfddf9a98e80ac282ff832ab667c1a33b743940babbd048bb17" + "68cf3ed22af30e42f34fc70ca484e8e4eeaedac6410bd3f228677cc791e6f46c" ], "hooks/relations/hacluster/test-requirements.txt": [ "interface:hacluster", "static", - "2c37d84ada8578ba5ed44f99f10470710c91d370052a867541f31b5c6a357b07" + "63756e4b1c67bc161cee0d30d460dbb83911b2c064dc1c55454a30c1ab877616" ], "hooks/relations/http/.gitignore": [ "interface:http", @@ -655,7 +685,7 @@ "layer.yaml": [ "kubeapi-load-balancer", "dynamic", - "98380972be2b81b4b27449ff197b8bccdfd9c427df3e8792a5fd530365e84d92" + "d7bac049bb8874aaab83bbe0339f1c1a4e726f27e548fa9705a0c890db70d5b2" ], "lib/.gitkeep": [ "layer:nginx", @@ -675,7 +705,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -690,7 +720,7 @@ "lib/charms/layer/kubernetes_common.py": [ "layer:kubernetes-common", "static", - "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101" + "29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b" ], "lib/charms/layer/nagios.py": [ "layer:nagios", @@ -717,6 +747,11 @@ "static", "34531c3980777b661b913d77c432fc371ed10425473c2eb365b1dd5540c2ec6e" ], + "lib/charms/leadership.py": [ + "layer:leadership", + "static", + "20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332" + ], "lib/debug_script.py": [ "layer:debug", "static", @@ -735,7 +770,7 @@ "metadata.yaml": [ "kubeapi-load-balancer", "dynamic", - "7224029776479946a04ca0237cadd1e16bcc3fa7e138d7732e30e0af07d0cd73" + "d7ed0ef2446de4644bc2840f2290ba72faa5136474d36817d4e4633495964c33" ], "metrics.yaml": [ "kubeapi-load-balancer", @@ -753,7 +788,7 @@ "19689509a5fb9bfc90ed1e873122ac0a90f22533b7f40055c38fdd587fe297de" ], "reactive/__init__.py": [ - "layer:basic", + "layer:leadership", "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], @@ -767,10 +802,15 @@ "static", "0b34980232eec9866c85b55070db7e72a04689f92b338207c5839531abd0eadc" ], + "reactive/leadership.py": [ + "layer:leadership", + "static", + "e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f" + ], "reactive/load_balancer.py": [ "kubeapi-load-balancer", "static", - "77a41c7fb062e3091abc6e2a57a648722d17ca11a01c88c33069eed7d413296a" + "a461c5478bd62fbe35c1cff1b867d5c876bf4d87a7d10dabe3ab4298c1dbb120" ], "reactive/nginx.py": [ "layer:nginx", @@ -792,21 +832,6 @@ "static", "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" ], - "script/bootstrap": [ - "kubeapi-load-balancer", - "static", - "e0c77e16a79dcb31cb6378687e3465151a74fd8e6dd2083a662fb8c1fe5168e2" - ], - "script/build": [ - "kubeapi-load-balancer", - "static", - "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633" - ], - "script/upload": [ - "kubeapi-load-balancer", - "static", - "8a13f3dade7374df2250ac04dc82fb3a39a328412ed384721576852a54a34114" - ], "templates/.gitkeep": [ "layer:nginx", "static", @@ -815,37 +840,72 @@ "templates/apilb.conf": [ "kubeapi-load-balancer", "static", - "ec8fc3d9cb4ff7ec8499ad6900e813cfbb2fbe7b802944d3aa0ce1d12963be52" + "bce0d30720ebe4e2173047e699e2ddc75f2a4e1e3e53966cdfddc9723de80d75" + ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" ], "templates/vhost.conf.ex": [ "layer:nginx", "static", "f68c366c35a8487acb78da6f1086eeee33a3eccdbe5a524509039c0c41ad5d5a" ], - "tests/conftest.py": [ + "tests/data/bundle.yaml": [ + "kubeapi-load-balancer", + "static", + "48c2cfe2d03a11f33b63e789465db7f5509bb85a44194d523d3db0bd1c51add3" + ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/integration/test_kubeapi-load-balancer_integration.py": [ + "kubeapi-load-balancer", + "static", + "3188d793533cbc95314eceaf613168160413da84828fbe11e09dd50afef1209e" + ], + "tests/unit/conftest.py": [ "kubeapi-load-balancer", "static", "6b67fae874cf23514acce521237850807e1b45f5ddaac1777237392e66b8ad53" ], - "tests/test_kubeapi_load_balancer.py": [ + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4" + ], + "tests/unit/test_kubeapi_load_balancer.py": [ "kubeapi-load-balancer", "static", "8c31c2541800259eab3461d0295ed0c76d763596b2a99a5ecdd683d65402517f" ], + "tests/validate-wheelhouse.sh": [ + "kubeapi-load-balancer", + "static", + "1c74bea041866cf4bd75763190d3c512e1d63a19b04e35178a64b8c517bb3231" + ], "tox.ini": [ "kubeapi-load-balancer", "static", - "85b2e7b5880fe8cc3f0fbbfb3496c2a8718c775aee7b8002929a596d35927073" + "21bc06f83720144cb194ddd8725cf356e0beebcaf87c8a0b7c2a269ffb82ae35" ], "version": [ "kubeapi-load-balancer", "dynamic", - "f7b6b97993cc32152f2c110a487f9eac0896218e2292a13c252976d9548e3435" + "41dbfa1b715c748c7de3b265c51d85f0ada5d4f48184f02c963c1d39e36fd8c9" ], "wheelhouse.txt": [ - "layer:nginx", + "kubeapi-load-balancer", "dynamic", - "27c996e4c9738557fed60f48dc535fbec68415f08303743d23d1ed51675a361d" + "3c1119359719500a5c3fabab7215289529e4dc8ad65826278846ef11ef78ed19" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -853,7 +913,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -867,21 +927,51 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/cached-property-1.5.2.tar.gz": [ + "__pip__", + "dynamic", + "9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130" + ], + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", "dynamic", "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616" ], + "wheelhouse/loadbalancer_interface-1.1.1.tar.gz": [ + "kubeapi-load-balancer", + "dynamic", + "c71d50bb66286d6e15a5f2975c0a316a3cd43c2042428258c96d1b4b95e5706b" + ], + "wheelhouse/marshmallow-3.14.0.tar.gz": [ + "__pip__", + "dynamic", + "bba1a940985c052c5cc7849f97da196ebc81f3b85ec10c56ef1f3228aa9cbe74" + ], + "wheelhouse/marshmallow-enum-1.5.1.tar.gz": [ + "__pip__", + "dynamic", + "38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58" + ], "wheelhouse/netaddr-0.7.19.tar.gz": [ "layer:basic", "dynamic", "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd" ], + "wheelhouse/ops-1.2.0.tar.gz": [ + "__pip__", + "dynamic", + "3deb00ad7952b203502290a79bf8c8ce9b70e4f34fec3307fd45133c97a45824" + ], + "wheelhouse/ops_reactive_interface-1.0.1.tar.gz": [ + "__pip__", + "dynamic", + "9ed351c42fc187299c23125975aa3dfee9f6aaae0c9d49bce8904ac079255dba" + ], "wheelhouse/pbr-5.6.0.tar.gz": [ "__pip__", "dynamic", @@ -892,10 +982,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -907,10 +997,10 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], "wheelhouse/toml-0.10.2.tar.gz": [ "layer:nginx", diff --git a/kubeapi-load-balancer/.github/workflows/build.yml b/kubeapi-load-balancer/.github/workflows/build.yml deleted file mode 100644 index eb64988..0000000 --- a/kubeapi-load-balancer/.github/workflows/build.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Builds kubeapi-load-balancer charm -on: [push, pull_request] - -jobs: - build: - name: Build charm - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: '3.8' - - name: Run build - run: | - make charm diff --git a/kubeapi-load-balancer/.github/workflows/main.yml b/kubeapi-load-balancer/.github/workflows/main.yml new file mode 100644 index 0000000..fb217c9 --- /dev/null +++ b/kubeapi-load-balancer/.github/workflows/main.yml @@ -0,0 +1,47 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint-unit-wheelhouse: + name: Lint, Unit, Wheelhouse + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + sudo snap install charm --classic + - name: Lint + run: tox -vve lint + - name: Unit Tests + run: tox -vve unit + - name: Validate Wheelhouse + run: tox -vve validate-wheelhouse + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764" + - name: Run test + run: tox -e integration diff --git a/kubeapi-load-balancer/.github/workflows/tox.yaml b/kubeapi-load-balancer/.github/workflows/tox.yaml deleted file mode 100644 index b07172d..0000000 --- a/kubeapi-load-balancer/.github/workflows/tox.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: Run tests with Tox - -on: [push] - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - matrix: - python: [3.5, 3.6, 3.7, 3.8] - - steps: - - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python }} - - name: Install Tox and any other packages - run: pip install tox - - name: Run Tox - run: tox -e py # Run tox using the version of Python in `PATH` diff --git a/kubeapi-load-balancer/.travis/profile-update.yaml b/kubeapi-load-balancer/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/kubeapi-load-balancer/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/kubeapi-load-balancer/Makefile b/kubeapi-load-balancer/Makefile index 36d42c8..a1ad3a5 100644 --- a/kubeapi-load-balancer/Makefile +++ b/kubeapi-load-balancer/Makefile @@ -1,18 +1,24 @@ -CHANNEL ?= unpublished -CHARM := kubeapi-load-balancer +#!/usr/bin/make -setup-env: - bash script/bootstrap +all: lint unit_test -charm: setup-env - bash script/build -upload: -ifndef NAMESPACE - $(error NAMESPACE is not set) -endif +.PHONY: clean +clean: + @rm -rf .tox - env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) -.phony: charm upload setup-env -all: charm +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/kubeapi-load-balancer/copyright.layer-leadership b/kubeapi-load-balancer/copyright.layer-leadership new file mode 100644 index 0000000..08b983f --- /dev/null +++ b/kubeapi-load-balancer/copyright.layer-leadership @@ -0,0 +1,15 @@ +Copyright 2015-2016 Canonical Ltd. + +This file is part of the Leadership Layer for Juju. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3, as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranties of +MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-broken b/kubeapi-load-balancer/hooks/lb-consumers-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-changed b/kubeapi-load-balancer/hooks/lb-consumers-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-created b/kubeapi-load-balancer/hooks/lb-consumers-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-departed b/kubeapi-load-balancer/hooks/lb-consumers-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/lb-consumers-relation-joined b/kubeapi-load-balancer/hooks/lb-consumers-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubeapi-load-balancer/hooks/lb-consumers-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml index edd0c90..f03f3d7 100644 --- a/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml +++ b/kubeapi-load-balancer/hooks/relations/hacluster/interface.yaml @@ -11,3 +11,6 @@ ignore: - 'tox.ini' - 'unit_tests' - '.zuul.yaml' + - 'setup.cfg' + - 'setup.py' + - '**/ops_ha_interface.py' diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/__init__.py b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubernetes-master/hooks/relations/hacluster/common.py b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py similarity index 72% rename from kubernetes-master/hooks/relations/hacluster/common.py rename to kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py index d896510..e4b13ff 100644 --- a/kubernetes-master/hooks/relations/hacluster/common.py +++ b/kubeapi-load-balancer/hooks/relations/hacluster/interface_hacluster/common.py @@ -13,9 +13,292 @@ import hashlib import ipaddress +import json from six import string_types +class ResourceManagement(): + + def data_changed(self, data_id, data, hash_type='md5'): + raise NotImplementedError + + def get_local(self, key, default=None, scope=None): + raise NotImplementedError + + def set_local(self, key=None, value=None, data=None, scope=None, **kwdata): + raise NotImplementedError + + def set_remote(self, key=None, value=None, data=None, scope=None, + **kwdata): + raise NotImplementedError + + def is_clustered(self): + """Has the hacluster charm set clustered? + + The hacluster charm sets cluster=True when it determines it is ready. + Check the relation data for clustered and force a boolean return. + + :returns: boolean + """ + clustered_values = self.get_remote_all('clustered') + if clustered_values: + # There is only ever one subordinate hacluster unit + clustered = clustered_values[0] + # Future versions of hacluster will return a bool + # Current versions return a string + if type(clustered) is bool: + return clustered + elif (clustered is not None and + (clustered.lower() == 'true' or + clustered.lower() == 'yes')): + return True + return False + + def bind_on(self, iface=None, mcastport=None): + relation_data = {} + if iface: + relation_data['corosync_bindiface'] = iface + if mcastport: + relation_data['corosync_mcastport'] = mcastport + + if relation_data and self.data_changed('hacluster-bind_on', + relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def manage_resources(self, crm): + """ + Request for the hacluster to manage the resources defined in the + crm object. + + res = CRM() + res.primitive('res_neutron_haproxy', 'lsb:haproxy', + op='monitor interval="5s"') + res.init_services('haproxy') + res.clone('cl_nova_haproxy', 'res_neutron_haproxy') + + hacluster.manage_resources(crm) + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + relation_data = { + 'json_{}'.format(k): json.dumps(v, sort_keys=True) + for k, v in crm.items() if v + } + if self.data_changed('hacluster-manage_resources', relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def bind_resources(self, iface=None, mcastport=None): + """Inform the ha subordinate about each service it should manage. The + child class specifies the services via self.ha_resources + + :param iface: string - Network interface to bind to + :param mcastport: int - Multicast port corosync should use for cluster + management traffic + """ + if mcastport is None: + mcastport = 4440 + resources_dict = self.get_local('resources') + self.bind_on(iface=iface, mcastport=mcastport) + if resources_dict: + resources = CRM(**resources_dict) + self.manage_resources(resources) + + def delete_resource(self, resource_name): + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add_delete_resource(resource_name) + self.set_local(resources=resources) + + def add_vip(self, name, vip, iface=None, netmask=None): + """Add a VirtualIP object for each user specified vip to self.resources + + :param name: string - Name of service + :param vip: string - Virtual IP to be managed + :param iface: string - Network interface to bind vip to + :param netmask: string - Netmask for vip + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + VirtualIP( + name, + vip, + nic=iface, + cidr=netmask,)) + + # Vip Group + group = 'grp_{}_vips'.format(name) + vip_res_group_members = [] + if resource_dict: + vip_resources = resource_dict.get('resources') + if vip_resources: + for vip_res in vip_resources: + if 'vip' in vip_res: + vip_res_group_members.append(vip_res) + resources.group(group, + *sorted(vip_res_group_members)) + + self.set_local(resources=resources) + + def remove_vip(self, name, vip, iface=None): + """Remove a virtual IP + + :param name: string - Name of service + :param vip: string - Virtual IP + :param iface: string - Network interface vip bound to + """ + if iface: + nic_name = iface + else: + nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] + self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) + + def add_init_service(self, name, service, clone=True): + """Add a InitService object for haproxy to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in init system + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + InitService(name, service, clone)) + self.set_local(resources=resources) + + def remove_init_service(self, name, service): + """Remove an init service + + :param name: string - Name of service + :param service: string - Name of service used in init system + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_systemd_service(self, name, service, clone=True): + """Add a SystemdService object to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in systemd + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + SystemdService(name, service, clone)) + self.set_local(resources=resources) + + def remove_systemd_service(self, name, service): + """Remove a systemd service + + :param name: string - Name of service + :param service: string - Name of service used in systemd + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_dnsha(self, name, ip, fqdn, endpoint_type): + """Add a DNS entry to self.resources + + :param name: string - Name of service + :param ip: string - IP address dns entry should resolve to + :param fqdn: string - The DNS entry name + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + DNSEntry(name, ip, fqdn, endpoint_type)) + + # DNS Group + group = 'grp_{}_hostnames'.format(name) + dns_res_group_members = [] + if resource_dict: + dns_resources = resource_dict.get('resources') + if dns_resources: + for dns_res in dns_resources: + if 'hostname' in dns_res: + dns_res_group_members.append(dns_res) + resources.group(group, + *sorted(dns_res_group_members)) + + self.set_local(resources=resources) + + def remove_dnsha(self, name, endpoint_type): + """Remove a DNS entry + + :param name: string - Name of service + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + self.delete_resource(res_key) + + def add_colocation(self, name, score, colo_resources, node_attribute=None): + """Add a colocation directive + + :param name: string - Name of colocation directive + :param score: string - ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY}. See + CRM.colocation for more details + :param colo_resources: List[string] - List of resource names to + colocate + :param node_attribute: Colocate resources on a set of nodes with this + attribute and not necessarily on the same node. + """ + node_config = {} + if node_attribute: + node_config = { + 'node_attribute': node_attribute} + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.colocation( + name, + score, + *colo_resources, + **node_config) + self.set_local(resources=resources) + + def remove_colocation(self, name): + """Remove a colocation directive + + :param name: string - Name of colocation directive + """ + self.delete_resource(name) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + raise NotImplementedError + + class CRM(dict): """ Configuration object for Pacemaker resources for the HACluster diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/requires.py b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py index 9b72d97..395a658 100644 --- a/kubeapi-load-balancer/hooks/relations/hacluster/requires.py +++ b/kubeapi-load-balancer/hooks/relations/hacluster/requires.py @@ -11,18 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import hashlib - -import relations.hacluster.common +import relations.hacluster.interface_hacluster.common as common from charms.reactive import hook from charms.reactive import RelationBase from charms.reactive import scopes -from charms.reactive.helpers import data_changed +from charms.reactive.helpers import data_changed as rh_data_changed from charmhelpers.core import hookenv -class HAClusterRequires(RelationBase): +class HAClusterRequires(RelationBase, common.ResourceManagement): # The hacluster charm is a subordinate charm and really only works # for a single service to the HA Cluster relation, therefore set the # expected scope to be GLOBAL. @@ -44,232 +41,8 @@ class HAClusterRequires(RelationBase): self.remove_state('{relation_name}.available') self.remove_state('{relation_name}.connected') - def is_clustered(self): - """Has the hacluster charm set clustered? - - The hacluster charm sets cluster=True when it determines it is ready. - Check the relation data for clustered and force a boolean return. - - :returns: boolean - """ - clustered_values = self.get_remote_all('clustered') - if clustered_values: - # There is only ever one subordinate hacluster unit - clustered = clustered_values[0] - # Future versions of hacluster will return a bool - # Current versions return a string - if type(clustered) is bool: - return clustered - elif (clustered is not None and - (clustered.lower() == 'true' or - clustered.lower() == 'yes')): - return True - return False - - def bind_on(self, iface=None, mcastport=None): - relation_data = {} - if iface: - relation_data['corosync_bindiface'] = iface - if mcastport: - relation_data['corosync_mcastport'] = mcastport - - if relation_data and data_changed('hacluster-bind_on', relation_data): - self.set_local(**relation_data) - self.set_remote(**relation_data) - - def manage_resources(self, crm): - """ - Request for the hacluster to manage the resources defined in the - crm object. - - res = CRM() - res.primitive('res_neutron_haproxy', 'lsb:haproxy', - op='monitor interval="5s"') - res.init_services('haproxy') - res.clone('cl_nova_haproxy', 'res_neutron_haproxy') - - hacluster.manage_resources(crm) - - :param crm: CRM() instance - Config object for Pacemaker resources - :returns: None - """ - relation_data = { - 'json_{}'.format(k): json.dumps(v, sort_keys=True) - for k, v in crm.items() if v - } - if data_changed('hacluster-manage_resources', relation_data): - self.set_local(**relation_data) - self.set_remote(**relation_data) - - def bind_resources(self, iface=None, mcastport=None): - """Inform the ha subordinate about each service it should manage. The - child class specifies the services via self.ha_resources - - :param iface: string - Network interface to bind to - :param mcastport: int - Multicast port corosync should use for cluster - management traffic - """ - if mcastport is None: - mcastport = 4440 - resources_dict = self.get_local('resources') - self.bind_on(iface=iface, mcastport=mcastport) - if resources_dict: - resources = relations.hacluster.common.CRM(**resources_dict) - self.manage_resources(resources) - - def delete_resource(self, resource_name): - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add_delete_resource(resource_name) - self.set_local(resources=resources) - - def add_vip(self, name, vip, iface=None, netmask=None): - """Add a VirtualIP object for each user specified vip to self.resources - - :param name: string - Name of service - :param vip: string - Virtual IP to be managed - :param iface: string - Network interface to bind vip to - :param netmask: string - Netmask for vip - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.VirtualIP( - name, - vip, - nic=iface, - cidr=netmask,)) - - # Vip Group - group = 'grp_{}_vips'.format(name) - vip_res_group_members = [] - if resource_dict: - vip_resources = resource_dict.get('resources') - if vip_resources: - for vip_res in vip_resources: - if 'vip' in vip_res: - vip_res_group_members.append(vip_res) - resources.group(group, - *sorted(vip_res_group_members)) - - self.set_local(resources=resources) - - def remove_vip(self, name, vip, iface=None): - """Remove a virtual IP - - :param name: string - Name of service - :param vip: string - Virtual IP - :param iface: string - Network interface vip bound to - """ - if iface: - nic_name = iface - else: - nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] - self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) - - def add_init_service(self, name, service, clone=True): - """Add a InitService object for haproxy to self.resources - - :param name: string - Name of service - :param service: string - Name service uses in init system - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.InitService(name, service, clone)) - self.set_local(resources=resources) - - def remove_init_service(self, name, service): - """Remove an init service - - :param name: string - Name of service - :param service: string - Name of service used in init system - """ - res_key = 'res_{}_{}'.format( - name.replace('-', '_'), - service.replace('-', '_')) - self.delete_resource(res_key) - - def add_systemd_service(self, name, service, clone=True): - """Add a SystemdService object to self.resources - - :param name: string - Name of service - :param service: string - Name service uses in systemd - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.SystemdService(name, service, clone)) - self.set_local(resources=resources) - - def remove_systemd_service(self, name, service): - """Remove a systemd service - - :param name: string - Name of service - :param service: string - Name of service used in systemd - """ - res_key = 'res_{}_{}'.format( - name.replace('-', '_'), - service.replace('-', '_')) - self.delete_resource(res_key) - - def add_dnsha(self, name, ip, fqdn, endpoint_type): - """Add a DNS entry to self.resources - - :param name: string - Name of service - :param ip: string - IP address dns entry should resolve to - :param fqdn: string - The DNS entry name - :param endpoint_type: string - Public, private, internal etc - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.DNSEntry(name, ip, fqdn, endpoint_type)) - - # DNS Group - group = 'grp_{}_hostnames'.format(name) - dns_res_group_members = [] - if resource_dict: - dns_resources = resource_dict.get('resources') - if dns_resources: - for dns_res in dns_resources: - if 'hostname' in dns_res: - dns_res_group_members.append(dns_res) - resources.group(group, - *sorted(dns_res_group_members)) - - self.set_local(resources=resources) - - def remove_dnsha(self, name, endpoint_type): - """Remove a DNS entry - - :param name: string - Name of service - :param endpoint_type: string - Public, private, internal etc - :returns: None - """ - res_key = 'res_{}_{}_hostname'.format( - self.service_name.replace('-', '_'), - self.endpoint_type) - self.delete_resource(res_key) + def data_changed(self, data_id, data, hash_type='md5'): + return rh_data_changed(data_id, data, hash_type) def get_remote_all(self, key, default=None): """Return a list of all values presented by remote units for key""" diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt index 6da7df2..12452e5 100644 --- a/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt +++ b/kubeapi-load-balancer/hooks/relations/hacluster/test-requirements.txt @@ -4,3 +4,4 @@ stestr>=2.2.0 charms.reactive coverage>=3.6 netifaces +git+https://github.com/canonical/operator.git#egg=ops diff --git a/kubeapi-load-balancer/layer.yaml b/kubeapi-load-balancer/layer.yaml index 59f9e9c..0f3a5b3 100644 --- a/kubeapi-load-balancer/layer.yaml +++ b/kubeapi-load-balancer/layer.yaml @@ -15,6 +15,7 @@ - "layer:tls-client" - "layer:hacluster" - "interface:public-address" +- "layer:leadership" "exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"] "options": "apt": @@ -42,6 +43,7 @@ "nginx": {} "debug": {} "kubernetes-common": {} + "leadership": {} "kubeapi-load-balancer": {} "repo": "https://github.com/kubernetes/kubernetes.git" "is": "kubeapi-load-balancer" diff --git a/kubeapi-load-balancer/lib/charms/layer/basic.py b/kubeapi-load-balancer/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/kubeapi-load-balancer/lib/charms/layer/basic.py +++ b/kubeapi-load-balancer/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py b/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py index 0ac309f..fb14ad2 100644 --- a/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py +++ b/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py @@ -21,7 +21,12 @@ import subprocess import hashlib import json import traceback +import random +import string +import tempfile +import yaml +from base64 import b64decode, b64encode from pathlib import Path from subprocess import check_output, check_call from socket import gethostname, getfqdn @@ -29,19 +34,23 @@ from shlex import split from subprocess import CalledProcessError from charmhelpers.core import hookenv, unitdata from charmhelpers.core import host +from charmhelpers.core.templating import render from charms.reactive import endpoint_from_flag, is_state from time import sleep +AUTH_SECRET_NS = "kube-system" +AUTH_SECRET_TYPE = "juju.is/token-auth" + db = unitdata.kv() -kubeclientconfig_path = '/root/.kube/config' -gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS' -kubeproxyconfig_path = '/root/cdk/kubeproxyconfig' -certs_dir = Path('/root/cdk') -ca_crt_path = certs_dir / 'ca.crt' -server_crt_path = certs_dir / 'server.crt' -server_key_path = certs_dir / 'server.key' -client_crt_path = certs_dir / 'client.crt' -client_key_path = certs_dir / 'client.key' +kubeclientconfig_path = "/root/.kube/config" +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +kubeproxyconfig_path = "/root/cdk/kubeproxyconfig" +certs_dir = Path("/root/cdk") +ca_crt_path = certs_dir / "ca.crt" +server_crt_path = certs_dir / "server.crt" +server_key_path = certs_dir / "server.key" +client_crt_path = certs_dir / "client.crt" +client_key_path = certs_dir / "client.key" def get_version(bin_name): @@ -56,13 +65,13 @@ def get_version(bin_name): (1, 6, 0) """ - cmd = '{} --version'.format(bin_name).split() - version_string = subprocess.check_output(cmd).decode('utf-8') + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) def retry(times, delay_secs): - """ Decorator for retrying a method call. + """Decorator for retrying a method call. Args: times: How many times should we retry before giving up @@ -72,7 +81,7 @@ def retry(times, delay_secs): """ def retry_decorator(func): - """ Decorator to wrap the function provided. + """Decorator to wrap the function provided. Args: func: Provided function should return either True od False @@ -80,6 +89,7 @@ def retry(times, delay_secs): Returns: A callable that would return the last call outcome """ + def _wrapped(*args, **kwargs): res = func(*args, **kwargs) attempt = 0 @@ -90,36 +100,37 @@ def retry(times, delay_secs): break attempt += 1 return res + return _wrapped return retry_decorator def calculate_resource_checksum(resource): - ''' Calculate a checksum for a resource ''' + """Calculate a checksum for a resource""" md5 = hashlib.md5() path = hookenv.resource_get(resource) if path: - with open(path, 'rb') as f: + with open(path, "rb") as f: data = f.read() md5.update(data) return md5.hexdigest() def get_resource_checksum_db_key(checksum_prefix, resource): - ''' Convert a resource name to a resource checksum database key. ''' + """Convert a resource name to a resource checksum database key.""" return checksum_prefix + resource def migrate_resource_checksums(checksum_prefix, snap_resources): - ''' Migrate resource checksums from the old schema to the new one ''' + """Migrate resource checksums from the old schema to the new one""" for resource in snap_resources: new_key = get_resource_checksum_db_key(checksum_prefix, resource) if not db.get(new_key): path = hookenv.resource_get(resource) if path: # old key from charms.reactive.helpers.any_file_changed - old_key = 'reactive.files_changed.' + path + old_key = "reactive.files_changed." + path old_checksum = db.get(old_key) db.set(new_key, old_checksum) else: @@ -131,7 +142,7 @@ def migrate_resource_checksums(checksum_prefix, snap_resources): def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): - hookenv.status_set('maintenance', 'Checking resources') + hookenv.status_set("maintenance", "Checking resources") for resource in snap_resources: key = get_resource_checksum_db_key(checksum_prefix, resource) old_checksum = db.get(key) @@ -148,25 +159,31 @@ def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): db.set(key, checksum) -def get_ingress_address(endpoint_name): +def get_ingress_address(endpoint_name, ignore_addresses=None): try: network_info = hookenv.network_get(endpoint_name) except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: # if they don't have ingress-addresses they are running a juju that # doesn't support spaces, so just return the private address - return hookenv.unit_get('private-address') + return hookenv.unit_get("private-address") - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) # Need to prefer non-fan IP addresses due to various issues, e.g. # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll # prioritize those last. Not technically correct, but good enough. try: - sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731 + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 addresses = sorted(addresses, key=sort_key) except Exception: hookenv.log(traceback.format_exc()) @@ -180,10 +197,10 @@ def get_ingress_address6(endpoint_name): except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: return None - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] for addr in addresses: ip_addr = ipaddress.ip_interface(addr).ip @@ -194,35 +211,35 @@ def get_ingress_address6(endpoint_name): def service_restart(service_name): - hookenv.status_set('maintenance', 'Restarting {0} service'.format( - service_name)) + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) host.service_restart(service_name) def service_start(service_name): - hookenv.log('Starting {0} service.'.format(service_name)) + hookenv.log("Starting {0} service.".format(service_name)) host.service_stop(service_name) def service_stop(service_name): - hookenv.log('Stopping {0} service.'.format(service_name)) + hookenv.log("Stopping {0} service.".format(service_name)) host.service_stop(service_name) def arch(): - '''Return the package architecture as a string. Raise an exception if the - architecture is not supported by kubernetes.''' + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" # Get the package architecture for this system. - architecture = check_output(['dpkg', '--print-architecture']).rstrip() + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() # Convert the binary result into a string. - architecture = architecture.decode('utf-8') + architecture = architecture.decode("utf-8") return architecture def get_service_ip(service, namespace="kube-system", errors_fatal=True): try: - output = kubectl('get', 'service', '--namespace', namespace, service, - '--output', 'json') + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) except CalledProcessError: if errors_fatal: raise @@ -230,20 +247,20 @@ def get_service_ip(service, namespace="kube-system", errors_fatal=True): return None else: svc = json.loads(output.decode()) - return svc['spec']['clusterIP'] + return svc["spec"]["clusterIP"] def kubectl(*args): - ''' Run a kubectl cli command with a config file. Returns stdout and throws - an error if the command fails. ''' - command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args) - hookenv.log('Executing {}'.format(command)) + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) return check_output(command) def kubectl_success(*args): - ''' Runs kubectl with the given args. Returns True if successful, False if - not. ''' + """Runs kubectl with the given args. Returns True if successful, False if + not.""" try: kubectl(*args) return True @@ -252,75 +269,97 @@ def kubectl_success(*args): def kubectl_manifest(operation, manifest): - ''' Wrap the kubectl creation command when using filepath resources + """Wrap the kubectl creation command when using filepath resources :param operation - one of get, create, delete, replace :param manifest - filepath to the manifest - ''' + """ # Deletions are a special case - if operation == 'delete': + if operation == "delete": # Ensure we immediately remove requested resources with --now - return kubectl_success(operation, '-f', manifest, '--now') + return kubectl_success(operation, "-f", manifest, "--now") else: # Guard against an error re-creating the same manifest multiple times - if operation == 'create': + if operation == "create": # If we already have the definition, its probably safe to assume # creation was true. - if kubectl_success('get', '-f', manifest): - hookenv.log('Skipping definition for {}'.format(manifest)) + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) return True # Execute the requested command that did not match any of the special # cases above - return kubectl_success(operation, '-f', manifest) + return kubectl_success(operation, "-f", manifest) def get_node_name(): - kubelet_extra_args = parse_extra_args('kubelet-extra-args') - cloud_provider = kubelet_extra_args.get('cloud-provider', '') - if is_state('endpoint.aws.ready'): - cloud_provider = 'aws' - elif is_state('endpoint.gcp.ready'): - cloud_provider = 'gce' - elif is_state('endpoint.openstack.ready'): - cloud_provider = 'openstack' - elif is_state('endpoint.vsphere.ready'): - cloud_provider = 'vsphere' - elif is_state('endpoint.azure.ready'): - cloud_provider = 'azure' - if cloud_provider == 'aws': + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": return getfqdn().lower() else: return gethostname().lower() -def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, - user='ubuntu', context='juju-context', - cluster='juju-cluster', password=None, token=None, - keystone=False, aws_iam_cluster_id=None): - '''Create a configuration for Kubernetes based on path using the supplied +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user - context and cluster.''' + context and cluster.""" if not key and not certificate and not password and not token: - raise ValueError('Missing authentication mechanism.') + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") - # token and password are mutually exclusive. Error early if both are - # present. The developer has requested an impossible situation. - # see: kubectl config set-credentials --help - if token and password: - raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. - cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ - '--server={2} --certificate-authority={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) # Delete old users - cmd = 'kubectl config --kubeconfig={0} unset users' - check_call(split(cmd.format(kubeconfig))) + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials {1} '.format(kubeconfig, user) + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) if key and certificate: - cmd = '{0} --client-key={1} --client-certificate={2} '\ - '--embed-certs=true'.format(cmd, key, certificate) + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) if password: cmd = "{0} --username={1} --password={2}".format(cmd, user, password) # This is mutually exclusive from password. They will not work together. @@ -328,71 +367,87 @@ def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, cmd = "{0} --token={1}".format(cmd, token) check_call(split(cmd)) # Create a default context with the cluster. - cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ - '--cluster={2} --user={3}' - check_call(split(cmd.format(kubeconfig, context, cluster, user))) + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) # Make the config use this new context. - cmd = 'kubectl config --kubeconfig={0} use-context {1}' - check_call(split(cmd.format(kubeconfig, context))) + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) if keystone: # create keystone user - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials keystone-user'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) check_call(split(cmd)) # create keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=keystone-user keystone'.format(kubeconfig, cluster) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) check_call(split(cmd)) # use keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'use-context keystone'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) check_call(split(cmd)) # manually add exec command until kubectl can do it for us - with open(kubeconfig, "r") as f: + with open(new_kubeconfig, "r") as f: content = f.read() - content = content.replace("""- name: keystone-user - user: {}""", """- name: keystone-user + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user user: exec: command: "/snap/bin/client-keystone-auth" apiVersion: "client.authentication.k8s.io/v1beta1" -""") - with open(kubeconfig, "w") as f: +""", + ) + with open(new_kubeconfig, "w") as f: f.write(content) if aws_iam_cluster_id: # create aws-iam context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=aws-iam-user aws-iam-authenticator' - check_call(split(cmd.format(kubeconfig, cluster))) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) # append a user for aws-iam - cmd = 'kubectl --kubeconfig={0} config set-credentials ' \ - 'aws-iam-user --exec-command=aws-iam-authenticator ' \ - '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \ - '--exec-arg="-r" --exec-arg="<>" ' \ - '--exec-api-version=client.authentication.k8s.io/v1alpha1' - check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id))) + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) # not going to use aws-iam context by default since we don't have # the desired arn. This will make the config not usable if copied. # cmd = 'kubectl config --kubeconfig={0} ' \ - # 'use-context aws-iam-authenticator'.format(kubeconfig) + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) def parse_extra_args(config_key): - elements = hookenv.config().get(config_key, '').split() + elements = hookenv.config().get(config_key, "").split() args = {} for element in elements: - if '=' in element: - key, _, value = element.partition('=') + if "=" in element: + key, _, value = element.partition("=") args[key] = value else: - args[element] = 'true' + args[element] = "true" return args @@ -411,7 +466,7 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # CIS benchmark action may inject kv config to pass failing tests. Merge # these after the func args as they should take precedence. - cis_args_key = 'cis-' + service + cis_args_key = "cis-" + service cis_args = db.get(cis_args_key) or {} args.update(cis_args) @@ -419,16 +474,16 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # construct an arg string for use by 'snap set'. args = {k: v for k, v in args.items() if v is not None} args = ['--%s="%s"' % arg for arg in args.items()] - args = ' '.join(args) + args = " ".join(args) snap_opts = {} for arg in prev_snap_args: # remove previous args by setting to null - snap_opts[arg] = 'null' - snap_opts['args'] = args - snap_opts = ['%s=%s' % opt for opt in snap_opts.items()] + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] - cmd = ['snap', 'set', service] + snap_opts + cmd = ["snap", "set", service] + snap_opts check_call(cmd) # Now that we've started doing snap configuration through the "args" @@ -437,36 +492,36 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): def _snap_common_path(component): - return Path('/var/snap/{}/common'.format(component)) + return Path("/var/snap/{}/common".format(component)) def cloud_config_path(component): - return _snap_common_path(component) / 'cloud-config.conf' + return _snap_common_path(component) / "cloud-config.conf" def _gcp_creds_path(component): - return _snap_common_path(component) / 'gcp-creds.json' + return _snap_common_path(component) / "gcp-creds.json" def _daemon_env_path(component): - return _snap_common_path(component) / 'environment' + return _snap_common_path(component) / "environment" def _cloud_endpoint_ca_path(component): - return _snap_common_path(component) / 'cloud-endpoint-ca.crt' + return _snap_common_path(component) / "cloud-endpoint-ca.crt" def encryption_config_path(): - apiserver_snap_common_path = _snap_common_path('kube-apiserver') - encryption_conf_dir = apiserver_snap_common_path / 'encryption' - return encryption_conf_dir / 'encryption_config.yaml' + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" def write_gcp_snap_config(component): # gcp requires additional credentials setup - gcp = endpoint_from_flag('endpoint.gcp.ready') + gcp = endpoint_from_flag("endpoint.gcp.ready") creds_path = _gcp_creds_path(component) - with creds_path.open('w') as fp: + with creds_path.open("w") as fp: os.fchmod(fp.fileno(), 0o600) fp.write(gcp.credentials) @@ -474,197 +529,206 @@ def write_gcp_snap_config(component): # services use the creds env var instead of the metadata server, as # well as making the cluster multizone comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text('[Global]\n' - 'token-url = nil\n' - 'multizone = true\n') + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) daemon_env_path = _daemon_env_path(component) if daemon_env_path.exists(): daemon_env = daemon_env_path.read_text() - if not daemon_env.endswith('\n'): - daemon_env += '\n' + if not daemon_env.endswith("\n"): + daemon_env += "\n" else: - daemon_env = '' + daemon_env = "" if gcp_creds_env_key not in daemon_env: - daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path) + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) daemon_env_path.parent.mkdir(parents=True, exist_ok=True) daemon_env_path.write_text(daemon_env) def generate_openstack_cloud_config(): # openstack requires additional credentials setup - openstack = endpoint_from_flag('endpoint.openstack.ready') + openstack = endpoint_from_flag("endpoint.openstack.ready") lines = [ - '[Global]', - 'auth-url = {}'.format(openstack.auth_url), - 'region = {}'.format(openstack.region), - 'username = {}'.format(openstack.username), - 'password = {}'.format(openstack.password), - 'tenant-name = {}'.format(openstack.project_name), - 'domain-name = {}'.format(openstack.user_domain_name), - 'tenant-domain-name = {}'.format(openstack.project_domain_name), + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), ] if openstack.endpoint_tls_ca: - lines.append('ca-file = /etc/config/endpoint-ca.cert') + lines.append("ca-file = /etc/config/endpoint-ca.cert") - lines.extend([ - '', - '[LoadBalancer]', - ]) + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) if openstack.has_octavia in (True, None): # Newer integrator charm will detect whether underlying OpenStack has # Octavia enabled so we can set this intelligently. If we're still # related to an older integrator, though, default to assuming Octavia # is available. - lines.append('use-octavia = true') + lines.append("use-octavia = true") else: - lines.append('use-octavia = false') - lines.append('lb-provider = haproxy') + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") if openstack.subnet_id: - lines.append('subnet-id = {}'.format(openstack.subnet_id)) + lines.append("subnet-id = {}".format(openstack.subnet_id)) if openstack.floating_network_id: - lines.append('floating-network-id = {}'.format( - openstack.floating_network_id)) + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) if openstack.lb_method: - lines.append('lb-method = {}'.format( - openstack.lb_method)) + lines.append("lb-method = {}".format(openstack.lb_method)) if openstack.manage_security_groups: - lines.append('manage-security-groups = {}'.format( - openstack.manage_security_groups)) - if any([openstack.bs_version, - openstack.trust_device_path, - openstack.ignore_volume_az]): - lines.append('') - lines.append('[BlockStorage]') + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") if openstack.bs_version is not None: - lines.append('bs-version = {}'.format(openstack.bs_version)) + lines.append("bs-version = {}".format(openstack.bs_version)) if openstack.trust_device_path is not None: - lines.append('trust-device-path = {}'.format( - openstack.trust_device_path)) + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) if openstack.ignore_volume_az is not None: - lines.append('ignore-volume-az = {}'.format( - openstack.ignore_volume_az)) - return '\n'.join(lines) + '\n' + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" def write_azure_snap_config(component): - azure = endpoint_from_flag('endpoint.azure.ready') + azure = endpoint_from_flag("endpoint.azure.ready") comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text(json.dumps({ - 'useInstanceMetadata': True, - 'useManagedIdentityExtension': True, - 'subscriptionId': azure.subscription_id, - 'resourceGroup': azure.resource_group, - 'location': azure.resource_group_location, - 'vnetName': azure.vnet_name, - 'vnetResourceGroup': azure.vnet_resource_group, - 'subnetName': azure.subnet_name, - 'securityGroupName': azure.security_group_name, - 'loadBalancerSku': 'standard' - })) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) -def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr, - bind_address=None): +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): kube_proxy_opts = {} - kube_proxy_opts['cluster-cidr'] = cluster_cidr - kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path - kube_proxy_opts['logtostderr'] = 'true' - kube_proxy_opts['v'] = '0' + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" num_apis = len(api_servers) - kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis] - kube_proxy_opts['hostname-override'] = get_node_name() + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() if bind_address: - kube_proxy_opts['bind-address'] = bind_address + kube_proxy_opts["bind-address"] = bind_address elif is_ipv6(cluster_cidr): - kube_proxy_opts['bind-address'] = '::' + kube_proxy_opts["bind-address"] = "::" if host.is_container(): - kube_proxy_opts['conntrack-max-per-core'] = '0' + kube_proxy_opts["conntrack-max-per-core"] = "0" if is_dual_stack(cluster_cidr): - kube_proxy_opts['feature-gates'] = "IPv6DualStack=true" + kube_proxy_opts["feature-gates"] = "IPv6DualStack=true" - configure_kubernetes_service(configure_prefix, 'kube-proxy', - kube_proxy_opts, 'proxy-extra-args') + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) def get_unit_number(): - return int(hookenv.local_unit().split('/')[1]) + return int(hookenv.local_unit().split("/")[1]) def cluster_cidr(): - '''Return the cluster CIDR provided by the CNI''' - cni = endpoint_from_flag('cni.available') + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") if not cni: return None config = hookenv.config() - if 'default-cni' in config: + if "default-cni" in config: # master - default_cni = config['default-cni'] + default_cni = config["default-cni"] else: # worker - kube_control = endpoint_from_flag('kube-control.dns.available') + kube_control = endpoint_from_flag("kube-control.dns.available") if not kube_control: return None default_cni = kube_control.get_default_cni() - return cni.get_config(default=default_cni)['cidr'] + return cni.get_config(default=default_cni)["cidr"] def is_dual_stack(cidrs): - '''Detect IPv4/IPv6 dual stack from CIDRs''' + """Detect IPv4/IPv6 dual stack from CIDRs""" return {net.version for net in get_networks(cidrs)} == {4, 6} def is_ipv4(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv4_network(cidrs) is not None def is_ipv6(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv6_network(cidrs) is not None def is_ipv6_preferred(cidrs): - '''Detect if IPv6 is preffered from CIDRs''' + """Detect if IPv6 is preffered from CIDRs""" return get_networks(cidrs)[0].version == 6 def get_networks(cidrs): - '''Convert a comma-separated list of CIDRs to a list of networks.''' + """Convert a comma-separated list of CIDRs to a list of networks.""" if not cidrs: return [] - return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] def get_ipv4_network(cidrs): - '''Get the IPv4 network from the given CIDRs or None''' + """Get the IPv4 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(4) def get_ipv6_network(cidrs): - '''Get the IPv6 network from the given CIDRs or None''' + """Get the IPv6 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(6) def enable_ipv6_forwarding(): - '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.''' - check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1']) + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) def get_bind_addrs(ipv4=True, ipv6=True): - '''Get all global-scoped addresses that we might bind to.''' + """Get all global-scoped addresses that we might bind to.""" try: output = check_output(["ip", "-br", "addr", "show", "scope", "global"]) except CalledProcessError: # stderr will have any details, and go to the log - hookenv.log('Unable to determine global addresses', hookenv.ERROR) + hookenv.log("Unable to determine global addresses", hookenv.ERROR) return [] - ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker') + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") accept_versions = set() if ipv4: accept_versions.add(4) @@ -672,10 +736,11 @@ def get_bind_addrs(ipv4=True, ipv6=True): accept_versions.add(6) addrs = [] - for line in output.decode('utf8').splitlines(): + for line in output.decode("utf8").splitlines(): intf, state, *intf_addrs = line.split() - if state != 'UP' or any(intf.startswith(prefix) - for prefix in ignore_interfaces): + if state != "UP" or any( + intf.startswith(prefix) for prefix in ignore_interfaces + ): continue for addr in intf_addrs: ip_addr = ipaddress.ip_interface(addr).ip @@ -689,24 +754,171 @@ class InvalidVMwareHost(Exception): def _get_vmware_uuid(): - serial_id_file = '/sys/class/dmi/id/product_serial' + serial_id_file = "/sys/class/dmi/id/product_serial" # The serial id from VMWare VMs comes in following format: # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 try: - with open(serial_id_file, 'r') as f: + with open(serial_id_file, "r") as f: serial_string = f.read().strip() if "VMware-" not in serial_string: - hookenv.log("Unable to find VMware ID in " - "product_serial: {}".format(serial_string)) + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) raise InvalidVMwareHost - serial_string = serial_string.split( - "VMware-")[1].replace(" ", "").replace("-", "") + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) uuid = "%s-%s-%s-%s-%s" % ( - serial_string[0:8], serial_string[8:12], serial_string[12:16], - serial_string[16:20], serial_string[20:32]) + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) except IOError as err: hookenv.log("Unable to read UUID from sysfs: {}".format(err)) - uuid = 'UNKNOWN' + uuid = "UNKNOWN" return uuid + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None diff --git a/kubeapi-load-balancer/lib/charms/leadership.py b/kubeapi-load-balancer/lib/charms/leadership.py new file mode 100644 index 0000000..d2a95fa --- /dev/null +++ b/kubeapi-load-balancer/lib/charms/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.reactive import not_unless + + +__all__ = ['leader_get', 'leader_set'] + + +@not_unless('leadership.is_leader') +def leader_set(*args, **kw): + '''Change leadership settings, per charmhelpers.core.hookenv.leader_set. + + Settings may either be passed in as a single dictionary, or using + keyword arguments. All values must be strings. + + The leadership.set.{key} reactive state will be set while the + leadership hook environment setting remains set. + + Changed leadership settings will set the leadership.changed.{key} + and leadership.changed states. These states will remain set until + the following hook. + + These state changes take effect immediately on the leader, and + in future hooks run on non-leaders. In this way both leaders and + non-leaders can share handlers, waiting on these states. + ''' + if args: + if len(args) > 1: + raise TypeError('leader_set() takes 1 positional argument but ' + '{} were given'.format(len(args))) + else: + settings = dict(args[0]) + else: + settings = {} + settings.update(kw) + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + + for key, value in settings.items(): + if value != previous.get(key): + reactive.set_state('leadership.changed.{}'.format(key)) + reactive.set_state('leadership.changed') + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + hookenv.leader_set(settings) + unitdata.kv().update(settings, prefix='leadership.settings.') + + +def leader_get(attribute=None): + '''Return leadership settings, per charmhelpers.core.hookenv.leader_get.''' + return hookenv.leader_get(attribute) diff --git a/kubeapi-load-balancer/metadata.yaml b/kubeapi-load-balancer/metadata.yaml index 4f96233..44b6e89 100644 --- a/kubeapi-load-balancer/metadata.yaml +++ b/kubeapi-load-balancer/metadata.yaml @@ -33,4 +33,6 @@ "interface": "http" "loadbalancer": "interface": "public-address" + "lb-consumers": + "interface": "loadbalancer" "subordinate": !!bool "false" diff --git a/kubeapi-load-balancer/reactive/leadership.py b/kubeapi-load-balancer/reactive/leadership.py new file mode 100644 index 0000000..29c6f3a --- /dev/null +++ b/kubeapi-load-balancer/reactive/leadership.py @@ -0,0 +1,68 @@ +# Copyright 2015-2016 Canonical Ltd. +# +# This file is part of the Leadership Layer for Juju. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranties of +# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from charmhelpers.core import hookenv +from charmhelpers.core import unitdata + +from charms import reactive +from charms.leadership import leader_get, leader_set + + +__all__ = ['leader_get', 'leader_set'] # Backwards compatibility + + +def initialize_leadership_state(): + '''Initialize leadership.* states from the hook environment. + + Invoked by hookenv.atstart() so states are available in + @hook decorated handlers. + ''' + is_leader = hookenv.is_leader() + if is_leader: + hookenv.log('Initializing Leadership Layer (is leader)') + else: + hookenv.log('Initializing Leadership Layer (is follower)') + + reactive.helpers.toggle_state('leadership.is_leader', is_leader) + + previous = unitdata.kv().getrange('leadership.settings.', strip=True) + current = hookenv.leader_get() + + # Handle deletions. + for key in set(previous.keys()) - set(current.keys()): + current[key] = None + + any_changed = False + for key, value in current.items(): + reactive.helpers.toggle_state('leadership.changed.{}'.format(key), + value != previous.get(key)) + if value != previous.get(key): + any_changed = True + reactive.helpers.toggle_state('leadership.set.{}'.format(key), + value is not None) + reactive.helpers.toggle_state('leadership.changed', any_changed) + + unitdata.kv().update(current, prefix='leadership.settings.') + + +# Per https://github.com/juju-solutions/charms.reactive/issues/33, +# this module may be imported multiple times so ensure the +# initialization hook is only registered once. I have to piggy back +# onto the namespace of a module imported before reactive discovery +# to do this. +if not hasattr(reactive, '_leadership_registered'): + hookenv.atstart(initialize_leadership_state) + reactive._leadership_registered = True diff --git a/kubeapi-load-balancer/reactive/load_balancer.py b/kubeapi-load-balancer/reactive/load_balancer.py index e6c35ac..e6a0b69 100644 --- a/kubeapi-load-balancer/reactive/load_balancer.py +++ b/kubeapi-load-balancer/reactive/load_balancer.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools import os import socket import subprocess @@ -23,7 +24,7 @@ from pathlib import Path from charms.reactive import when, when_any, when_not from charms.reactive import set_flag, is_state from charms.reactive import hook -from charms.reactive import clear_flag, endpoint_from_flag +from charms.reactive import clear_flag, endpoint_from_flag, endpoint_from_name from charmhelpers.core import hookenv from charmhelpers.core import host from charmhelpers.contrib.charmsupport import nrpe @@ -65,11 +66,10 @@ server_crt_path = cert_dir / 'server.crt' server_key_path = cert_dir / 'server.key' -@when('certificates.available', 'website.available') +@when('certificates.available') def request_server_certificates(): '''Send the data that is required to create a server certificate for this server.''' - website = endpoint_from_flag('website.available') # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() @@ -80,7 +80,7 @@ def request_server_certificates(): # The CN field is checked as a hostname, so if it's an IP, it # won't match unless also included in the SANs as an IP field. common_name, - kubernetes_common.get_ingress_address(website.endpoint_name), + kubernetes_common.get_ingress_address('website'), socket.gethostname(), socket.getfqdn(), ] + bind_ips @@ -107,15 +107,6 @@ def request_server_certificates(): key_path=server_key_path) -@when('config.changed.extra_sans', 'certificates.available', - 'website.available') -def update_certificate(): - # Using the config.changed.extra_sans flag to catch changes. - # IP changes will take ~5 minutes or so to propagate, but - # it will update. - request_server_certificates() - - @when('certificates.server.cert.available', 'nginx.available') @when_any('tls_client.certs.changed', @@ -148,40 +139,62 @@ def maybe_write_apilb_logrotate_config(): fp.write(apilb_nginx) -@when('nginx.available', 'apiserver.available', +@when('nginx.available', 'tls_client.certs.saved') +@when_any('endpoint.lb-consumers.joined', + 'apiserver.available') @when_not('upgrade.series.in-progress') def install_load_balancer(): ''' Create the default vhost template for load balancing ''' - apiserver = endpoint_from_flag('apiserver.available') - # Do both the key and certificate exist? - if server_crt_path.exists() and server_key_path.exists(): - # At this point the cert and key exist, and they are owned by root. - chown = ['chown', 'www-data:www-data', str(server_crt_path)] + apiserver = endpoint_from_name('apiserver') + lb_consumers = endpoint_from_name('lb-consumers') - # Change the owner to www-data so the nginx process can read the cert. - subprocess.call(chown) - chown = ['chown', 'www-data:www-data', str(server_key_path)] + if not (server_crt_path.exists() and server_key_path.exists()): + hookenv.log('Skipping due to missing cert') + return + if not (apiserver.services() or lb_consumers.all_requests): + hookenv.log('Skipping due to requests not ready') + return - # Change the owner to www-data so the nginx process can read the key. - subprocess.call(chown) + # At this point the cert and key exist, and they are owned by root. + chown = ['chown', 'www-data:www-data', str(server_crt_path)] - port = hookenv.config('port') - hookenv.open_port(port) - services = apiserver.services() - nginx.configure_site( - 'apilb', - 'apilb.conf', - server_name='_', - services=services, - port=port, - server_certificate=str(server_crt_path), - server_key=str(server_key_path), - proxy_read_timeout=hookenv.config('proxy_read_timeout') - ) + # Change the owner to www-data so the nginx process can read the cert. + subprocess.call(chown) + chown = ['chown', 'www-data:www-data', str(server_key_path)] - maybe_write_apilb_logrotate_config() - status.active('Loadbalancer ready.') + # Change the owner to www-data so the nginx process can read the key. + subprocess.call(chown) + + servers = {} + if apiserver and apiserver.services(): + servers[hookenv.config('port')] = { + (h['hostname'], h['port']) + for service in apiserver.services() + for h in service['hosts'] + } + for request in lb_consumers.all_requests: + for server_port in request.port_mapping.keys(): + service = servers.setdefault(server_port, set()) + service.update( + (backend, backend_port) + for backend, backend_port in itertools.product( + request.backends, request.port_mapping.values() + ) + ) + nginx.configure_site( + 'apilb', + 'apilb.conf', + servers=servers, + server_certificate=str(server_crt_path), + server_key=str(server_key_path), + proxy_read_timeout=hookenv.config('proxy_read_timeout') + ) + + maybe_write_apilb_logrotate_config() + for listen_port in servers.keys(): + hookenv.open_port(listen_port) + status.active('Loadbalancer ready.') @hook('upgrade-charm') @@ -218,12 +231,7 @@ def set_nginx_version(): hookenv.application_version_set(version.rstrip()) -@when('website.available') -def provide_application_details(): - ''' re-use the nginx layer website relation to relay the hostname/port - to any consuming kubernetes-workers, or other units that require the - kubernetes API ''' - website = endpoint_from_flag('website.available') +def _get_lb_address(): hacluster = endpoint_from_flag('ha.connected') forced_lb_ips = hookenv.config('loadbalancer-ips').split() address = None @@ -240,12 +248,73 @@ def provide_application_details(): address = vips elif dns_record: address = dns_record - if address: - website.configure(port=hookenv.config('port'), - private_address=address, - hostname=address) + return address + + +def _get_lb_port(prefer_private=True): + lb_consumers = endpoint_from_name('lb-consumers') + + # prefer a port from the newer, more explicit relations + public = filter(lambda r: r.public, lb_consumers.all_requests) + private = filter(lambda r: not r.public, lb_consumers.all_requests) + lb_reqs = (private, public) if prefer_private else (public, private) + for lb_req in itertools.chain(*lb_reqs): + return list(lb_req.port_mapping)[0] + + # fall back to the config + return hookenv.config('port') + + +@when('endpoint.lb-consumers.joined', + 'leadership.is_leader') +def provide_lb_consumers(): + '''Respond to any LB requests via the lb-consumers relation. + + This is used in favor for the more complex two relation setup using the + website and loadbalancer relations going forward. + ''' + lb_consumers = endpoint_from_name('lb-consumers') + lb_address = _get_lb_address() + for request in lb_consumers.all_requests: + response = request.response + if request.protocol not in (request.protocols.tcp, + request.protocols.http, + request.protocols.https): + response.error_type = response.error_types.unsupported + response.error_fields = { + 'protocol': 'Protocol must be one of: tcp, http, https' + } + lb_consumers.send_response(request) + continue + if lb_address: + private_address = lb_address + public_address = lb_address + else: + network_info = hookenv.network_get('lb-consumers', + str(request.relation.id)) + private_address = network_info['ingress-addresses'][0] + public_address = hookenv.unit_get('public-address') + if request.public: + response.address = public_address + else: + response.address = private_address + lb_consumers.send_response(request) + + +@when('website.available') +def provide_application_details(): + ''' re-use the nginx layer website relation to relay the hostname/port + to any consuming kubernetes-workers, or other units that require the + kubernetes API ''' + website = endpoint_from_flag('website.available') + lb_address = _get_lb_address() + lb_port = _get_lb_port(prefer_private=True) + if lb_address: + website.configure(port=lb_port, + private_address=lb_address, + hostname=lb_address) else: - website.configure(port=hookenv.config('port')) + website.configure(port=lb_port) @when('loadbalancer.available') @@ -253,26 +322,11 @@ def provide_loadbalancing(): '''Send the public address and port to the public-address interface, so the subordinates can get the public address of this loadbalancer.''' loadbalancer = endpoint_from_flag('loadbalancer.available') - hacluster = endpoint_from_flag('ha.connected') - forced_lb_ips = hookenv.config('loadbalancer-ips').split() - if forced_lb_ips: - address = forced_lb_ips - elif hacluster: - # in the hacluster world, we dump the vip or the dns - # on every unit's data. This is because the - # kubernetes-master charm just grabs the first - # one it sees and uses that ip/dns. - vips = hookenv.config('ha-cluster-vip').split() - dns_record = hookenv.config('ha-cluster-dns') - if vips: - address = vips - elif dns_record: - address = dns_record - else: - address = hookenv.unit_get('public-address') - else: + address = _get_lb_address() + lb_port = _get_lb_port(prefer_private=False) + if not address: address = hookenv.unit_get('public-address') - loadbalancer.set_address_port(address, hookenv.config('port')) + loadbalancer.set_address_port(address, lb_port) @when('nrpe-external-master.available') diff --git a/kubeapi-load-balancer/script/bootstrap b/kubeapi-load-balancer/script/bootstrap deleted file mode 100644 index c883e4c..0000000 --- a/kubeapi-load-balancer/script/bootstrap +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -x - -sudo apt update -sudo snap install charm --classic -sudo snap install yq diff --git a/kubeapi-load-balancer/script/build b/kubeapi-load-balancer/script/build deleted file mode 100644 index 6bbbc48..0000000 --- a/kubeapi-load-balancer/script/build +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm build -r --force -o "$CHARM_BUILD_DIR" diff --git a/kubeapi-load-balancer/script/upload b/kubeapi-load-balancer/script/upload deleted file mode 100644 index b8bd049..0000000 --- a/kubeapi-load-balancer/script/upload +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH - -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm whoami -RET=$? -if ((RET > 0)); then - echo "Not logged into charmstore" - exit 1 -fi - -URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url) - -if [ "$CHANNEL" != unpublished ]; then - charm release "$URL" --channel "$CHANNEL" -fi diff --git a/kubeapi-load-balancer/templates/apilb.conf b/kubeapi-load-balancer/templates/apilb.conf index 17bc61f..dc6e56c 100644 --- a/kubeapi-load-balancer/templates/apilb.conf +++ b/kubeapi-load-balancer/templates/apilb.conf @@ -1,16 +1,14 @@ -{% for app in services -%} -upstream target_service { - {% for host in app['hosts'] -%} - server {{ host['hostname'] }}:{{ host['port'] }}; - {% endfor %} +{% for server_port, backends in servers.items() -%} +upstream upstream_{{ server_port }} { + {%- for backend, backend_port in backends %} + server {{ backend }}:{{ backend_port }}; + {%- endfor %} } -{% endfor %} - server { - listen {{ port }} ssl http2; - listen [::]:{{ port }} ssl http2 ipv6only=on; - server_name {{ server_name }}; + listen {{ server_port }} ssl http2; + listen [::]:{{ server_port }} ssl http2 ipv6only=on; + server_name server_{{ server_port }}; access_log /var/log/nginx.access.log; error_log /var/log/nginx.error.log; @@ -36,7 +34,8 @@ server { add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version; - proxy_pass https://target_service; + proxy_pass https://upstream_{{ server_port }}; proxy_read_timeout {{ proxy_read_timeout }}; } } +{%- endfor %} diff --git a/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml b/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubeapi-load-balancer/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubeapi-load-balancer/tests/data/bundle.yaml b/kubeapi-load-balancer/tests/data/bundle.yaml new file mode 100644 index 0000000..be515f5 --- /dev/null +++ b/kubeapi-load-balancer/tests/data/bundle.yaml @@ -0,0 +1,107 @@ +description: A minimal two-machine Kubernetes cluster, appropriate for development. +series: focal +machines: + '0': + constraints: cores=2 mem=4G root-disk=16G + series: focal + '1': + constraints: cores=4 mem=4G root-disk=16G + series: focal +services: + containerd: + charm: cs:~containers/containerd + channel: edge + resources: {} + easyrsa: + charm: cs:~containers/easyrsa + channel: edge + num_units: 1 + resources: + easyrsa: 5 + to: + - '1' + etcd: + charm: cs:~containers/etcd + channel: edge + num_units: 1 + options: + channel: 3.4/stable + resources: + core: 0 + etcd: 3 + snapshot: 0 + to: + - '0' + flannel: + charm: cs:~containers/flannel + channel: edge + resources: + flannel-amd64: 653 + flannel-arm64: 650 + flannel-s390x: 637 + kubernetes-master: + charm: cs:~containers/kubernetes-master + channel: edge + constraints: cores=2 mem=4G root-disk=16G + expose: true + num_units: 1 + options: + channel: 1.20/stable + resources: + cdk-addons: 0 + core: 0 + kube-apiserver: 0 + kube-controller-manager: 0 + kube-proxy: 0 + kube-scheduler: 0 + kubectl: 0 + to: + - '0' + kubernetes-worker: + charm: cs:~containers/kubernetes-worker + channel: edge + constraints: cores=4 mem=4G root-disk=16G + num_units: 1 + options: + channel: 1.20/stable + resources: + cni-amd64: 690 + cni-arm64: 681 + cni-s390x: 693 + core: 0 + kube-proxy: 0 + kubectl: 0 + kubelet: 0 + to: + - '1' + kubeapi-load-balancer: + charm: {{k8s_lb_charm}} + num_units: 1 + expose: true +relations: +- - kubeapi-load-balancer:lb-consumers + - kubernetes-master:loadbalancer-internal +- - kubeapi-load-balancer:lb-consumers + - kubernetes-master:loadbalancer-external +- - kubernetes-master:kube-control + - kubernetes-worker:kube-control +- - kubernetes-master:certificates + - easyrsa:client +- - kubeapi-load-balancer:certificates + - easyrsa:client +- - kubernetes-master:etcd + - etcd:db +- - kubernetes-worker:certificates + - easyrsa:client +- - etcd:certificates + - easyrsa:client +- - flannel:etcd + - etcd:db +- - flannel:cni + - kubernetes-master:cni +- - flannel:cni + - kubernetes-worker:cni +- - containerd:containerd + - kubernetes-worker:container-runtime +- - containerd:containerd + - kubernetes-master:container-runtime diff --git a/kubeapi-load-balancer/tests/functional/conftest.py b/kubeapi-load-balancer/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubeapi-load-balancer/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubeapi-load-balancer/tests/functional/test_k8s_common.py b/kubeapi-load-balancer/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubeapi-load-balancer/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py b/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py new file mode 100644 index 0000000..554ed94 --- /dev/null +++ b/kubeapi-load-balancer/tests/integration/test_kubeapi-load-balancer_integration.py @@ -0,0 +1,41 @@ +import logging + +import pytest + + +log = logging.getLogger(__name__) + + +def _check_status_messages(ops_test): + """ Validate that the status messages are correct. """ + expected_messages = { + "kubernetes-master": "Kubernetes master running.", + "kubernetes-worker": "Kubernetes worker running.", + "kubeapi-load-balancer": "Loadbalancer ready.", + } + for app, message in expected_messages.items(): + for unit in ops_test.model.applications[app].units: + assert unit.workload_status_message == message + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test): + bundle = ops_test.render_bundle( + "tests/data/bundle.yaml", k8s_lb_charm=await ops_test.build_charm(".") + ) + await ops_test.model.deploy(bundle) + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60) + _check_status_messages(ops_test) + + +async def test_kube_api_endpoint(ops_test): + """ Validate that using the old MITM-style relation works""" + master = ops_test.model.applications["kubernetes-master"] + worker = ops_test.model.applications["kubernetes-worker"] + await master.remove_relation("loadbalancer-internal", "kubeapi-load-balancer") + await master.remove_relation("loadbalancer-external", "kubeapi-load-balancer") + await master.add_relation("kube-api-endpoint", "kubeapi-load-balancer") + await master.add_relation("loadbalancer", "kubeapi-load-balancer") + await worker.add_relation("kube-api-endpoint", "kubeapi-load-balancer") + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=30 * 60) + _check_status_messages(ops_test) diff --git a/kubeapi-load-balancer/tests/conftest.py b/kubeapi-load-balancer/tests/unit/conftest.py similarity index 100% rename from kubeapi-load-balancer/tests/conftest.py rename to kubeapi-load-balancer/tests/unit/conftest.py diff --git a/kubeapi-load-balancer/tests/unit/test_k8s_common.py b/kubeapi-load-balancer/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..0dcad31 --- /dev/null +++ b/kubeapi-load-balancer/tests/unit/test_k8s_common.py @@ -0,0 +1,122 @@ +import json +import string +from subprocess import CalledProcessError +from unittest.mock import Mock + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" diff --git a/kubeapi-load-balancer/tests/test_kubeapi_load_balancer.py b/kubeapi-load-balancer/tests/unit/test_kubeapi_load_balancer.py similarity index 100% rename from kubeapi-load-balancer/tests/test_kubeapi_load_balancer.py rename to kubeapi-load-balancer/tests/unit/test_kubeapi_load_balancer.py diff --git a/kubeapi-load-balancer/tests/validate-wheelhouse.sh b/kubeapi-load-balancer/tests/validate-wheelhouse.sh new file mode 100755 index 0000000..329dfca --- /dev/null +++ b/kubeapi-load-balancer/tests/validate-wheelhouse.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +build_dir="$(mktemp -d)" +function cleanup { rm -rf "$build_dir"; } +trap cleanup EXIT + +charm build . --build-dir "$build_dir" +pip install -f "$build_dir/kubeapi-load-balancer/wheelhouse" --no-index --no-cache-dir "$build_dir"/kubeapi-load-balancer/wheelhouse/* diff --git a/kubeapi-load-balancer/tox.ini b/kubeapi-load-balancer/tox.ini index 76fa574..8eee556 100644 --- a/kubeapi-load-balancer/tox.ini +++ b/kubeapi-load-balancer/tox.ini @@ -1,18 +1,34 @@ [tox] skipsdist = True -envlist = lint,py3 +envlist = lint,unit [testenv] basepython = python3 setenv = PYTHONPATH={toxinidir}:{toxinidir}/lib + +[testenv:unit] deps = pytest - flake8 ipdb git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test -commands = pytest --tb native -s {posargs} +commands = pytest --tb native -s --show-capture=no --log-cli-level=INFO {posargs} {toxinidir}/tests/unit + +[testenv:integration] +deps = + pytest + pytest-operator + ipdb +commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration [testenv:lint] -envdir = {toxworkdir}/py3 -commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests +deps = + flake8 +commands = flake8 {toxinidir}/reactive {toxinidir}/tests + +[testenv:validate-wheelhouse] +allowlist_externals = {toxinidir}/tests/validate-wheelhouse.sh +commands = {toxinidir}/tests/validate-wheelhouse.sh + +[flake8] +max-line-length = 88 diff --git a/kubeapi-load-balancer/version b/kubeapi-load-balancer/version index 1dea0b1..20817dd 100644 --- a/kubeapi-load-balancer/version +++ b/kubeapi-load-balancer/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/kubeapi-load-balancer/wheelhouse.txt b/kubeapi-load-balancer/wheelhouse.txt index f9c2fab..b1fa19e 100644 --- a/kubeapi-load-balancer/wheelhouse.txt +++ b/kubeapi-load-balancer/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 @@ -17,3 +19,6 @@ netaddr<=0.7.19 # layer:nginx toml +# kubeapi-load-balancer +loadbalancer-interface + diff --git a/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz b/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz new file mode 100644 index 0000000..501f2c0 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/cached-property-1.5.2.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.23.tar.gz b/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.1.1.tar.gz b/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.1.1.tar.gz new file mode 100644 index 0000000..265133c Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/loadbalancer_interface-1.1.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.0.tar.gz b/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.0.tar.gz new file mode 100644 index 0000000..14e4efc Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/marshmallow-3.14.0.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz b/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz new file mode 100644 index 0000000..642941a Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/marshmallow-enum-1.5.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/ops-1.2.0.tar.gz b/kubeapi-load-balancer/wheelhouse/ops-1.2.0.tar.gz new file mode 100644 index 0000000..2cb4358 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/ops-1.2.0.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz b/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz new file mode 100644 index 0000000..14f5ded Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/ops_reactive_interface-1.0.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz b/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/kubeapi-load-balancer/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz b/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz b/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/kubeapi-load-balancer/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/kubeapi-load-balancer/wheelhouse/six-1.16.0.tar.gz b/kubeapi-load-balancer/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/kubeapi-load-balancer/wheelhouse/six-1.16.0.tar.gz differ diff --git a/kubernetes-master/.build.manifest b/kubernetes-master/.build.manifest index bd4370c..0b538db 100644 --- a/kubernetes-master/.build.manifest +++ b/kubernetes-master/.build.manifest @@ -1,212 +1,212 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "85d7cc4f7180d19df20e264358e920004cec192b", + "branch": "refs/heads/master", + "rev": "d3acdf209cbaf5b732e9aba621778a0f56dbaeb9", "url": "layer:snap" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", "url": "layer:tls-client" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", "url": "layer:leadership" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", "url": "layer:metrics" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", "url": "layer:nagios" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688", "url": "layer:cdk-service-kicker" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "023c67941e18663a4df49f53edba809f43ba5069", "url": "layer:cis-benchmark" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf", "url": "layer:coordinator" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "763297a075b3654f261af20c84b940d87f55354e", + "branch": "refs/heads/master", + "rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156", "url": "layer:kubernetes-common" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "809f06c6f6521be59e21859eaebeccd13f4d8c28", "url": "layer:kubernetes-master-worker-base" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "60f82079cd9b312d17cb67bf797b6a23d27398f3", + "branch": "refs/heads/master", + "rev": "e22c18b133070ce354cebbda864a5aa8a4b60398", "url": "layer:vault-kv" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab", "url": "layer:status" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "76bddfb640ab8767fc7e4a4b73a4a4e781948f34", "url": "layer:apt" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "2c4c16cd9e4254494d79aac1d17eacf1620d1b0f", "url": "layer:vaultlocker" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "a8f88f16bb7771807a0f7fdb17ee16b0e310fc2b", "url": "layer:hacluster" }, { - "branch": "refs/heads/stable", - "rev": "7946456765a3774e1cab44d124e50cbaa294cf1c", + "branch": "refs/heads/master", + "rev": "31070c6182c76824c1541c571df334454f4fabaf", "url": "kubernetes-master" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d9850016d930a6d507b9fd45e2598d327922b140", "url": "interface:tls-certificates" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc", "url": "interface:nrpe-external-master" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", "url": "interface:container-runtime" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "dceab99ac3739cc7265e386287f100f1bfebc47f", "url": "interface:vault-kv" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "6c611a3c61909fda411f7a79af53908ec7bef2c8", + "branch": "refs/heads/master", + "rev": "8125a7baecccf9b0869e515b92300dde3a86f31b", "url": "interface:hacluster" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "c1061a29297084fa53c2474ba371671186ff3389", "url": "interface:ceph-admin" }, { - "branch": "(HEAD detached at e247aeff)", - "rev": "e247aeff0147756f5c70813d966b3865d0435d20", + "branch": "(HEAD detached at ccfa68be)", + "rev": "ccfa68bef24ab3e7f9f1e85c082390de1d62707d", "url": "interface:ceph-client" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382", "url": "interface:etcd" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", "url": "interface:http" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b", + "branch": "refs/heads/master", + "rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1", "url": "interface:kubernetes-cni" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "9bc32742b7720a755ada9526424e5d80092e1536", "url": "interface:kube-dns" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "2236a52be495a45b8f492bae37bbba50e468ef42", + "branch": "refs/heads/master", + "rev": "534310f5bca8edde02cadaf6ac42231cea0b040b", "url": "interface:kube-control" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "8e486e329dd12f70c4220874c795c0f0280d99ae", "url": "interface:kube-masters" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27", "url": "interface:public-address" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7", "url": "interface:aws-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb", "url": "interface:gcp-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "0d7a994f04b9e92ed847829ce8349b1a9c672e47", "url": "interface:openstack-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b", "url": "interface:vsphere-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "45b79107f7bd5f14b3b956d1f45f659a567b0999", + "branch": "refs/heads/master", + "rev": "8d2202e433d7c188de4df2fd4bddb355193e93ac", "url": "interface:azure-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "53e93b8820899f2251d207ed5d5c3b212ceb64de", "url": "interface:keystone-credentials" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "3f775242c16d53243c993d7ba0c896169ad1639e", "url": "interface:prometheus-manual" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "e64261e281f012a00d374c6779ec52e488cb8713", "url": "interface:grafana-dashboard" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "e9a8c168b81b687790119dd6df2e7a4c1f729c41", "url": "interface:aws-iam" } @@ -220,7 +220,7 @@ ".github/workflows/main.yml": [ "kubernetes-master", "static", - "26cb9b176329c7e49f6dea57523397f362c2591cbb409bbe099b04ecca0d2401" + "ac395c5924c9a8f884b7db92f4bef24e3dd8fd8b4d084976a0994adbc020a349" ], ".gitignore": [ "kubernetes-master", @@ -232,11 +232,6 @@ "static", "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2" ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" - ], "CONTRIBUTING.md": [ "kubernetes-master", "static", @@ -260,7 +255,7 @@ "actions.yaml": [ "kubernetes-master", "dynamic", - "18322aaa6c607fb92176d0706335ec94260e4d79d525291b91beba5b689d599d" + "2c09f607e00ff7b65d316088676af7d8df8bf7cf2c86f1f32590d57fe048837a" ], "actions/apply-manifest": [ "kubernetes-master", @@ -272,11 +267,6 @@ "static", "fd3c1b8ba478b7f933605897ace8ae9f3ee102d9992f46f1e36d95eb1b094b84" ], - "actions/create-rbd-pv": [ - "kubernetes-master", - "static", - "b962c4e9472c8bc2fb3c86eacdb109293e2b251ae1c80cee29f19549032b73b3" - ], "actions/debug": [ "layer:debug", "static", @@ -320,22 +310,22 @@ "actions/user-create": [ "kubernetes-master", "static", - "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7" + "2ffaf34bef1888ea11e0dba1df82e1fcf10dbe4a9a5c59e407772310f094f2a2" ], "actions/user-delete": [ "kubernetes-master", "static", - "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7" + "2ffaf34bef1888ea11e0dba1df82e1fcf10dbe4a9a5c59e407772310f094f2a2" ], "actions/user-list": [ "kubernetes-master", "static", - "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7" + "2ffaf34bef1888ea11e0dba1df82e1fcf10dbe4a9a5c59e407772310f094f2a2" ], "actions/user_actions.py": [ "kubernetes-master", "static", - "227d2b783e97fa61bfd33ee5e49487bea1abdaf01d835c6247bddef4ec28c2b7" + "2ffaf34bef1888ea11e0dba1df82e1fcf10dbe4a9a5c59e407772310f094f2a2" ], "bin/charm-env": [ "layer:basic", @@ -350,7 +340,7 @@ "config.yaml": [ "kubernetes-master", "dynamic", - "9bca6cd4212e06f367e3d0b13bfbf86d2e9f6c2e5bc50af19eb3293c6c337f04" + "dbd9cf8913f4a35f21e1eb2f12c8c4a6777b653513c478b66d3c00725f9addc1" ], "copyright": [ "kubernetes-master", @@ -472,6 +462,16 @@ "static", "ebf7f23ef6e39fb8e664bac2e9429e32aaeb673b4a51751724b835c007e85d3b" ], + "docs/README": [ + "kubernetes-master", + "static", + "ea099038f01227b2907a915aa9e93d9ed73d85f9b446edcbe079c8a8de21e0cf" + ], + "docs/index.md": [ + "kubernetes-master", + "static", + "86a64daf0d25da6fb4d3bead5110a35c3ac302f5e5642b6a8bb748fd9ae9871b" + ], "docs/status.md": [ "layer:status", "static", @@ -647,31 +647,6 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/cluster-dns-relation-broken": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/cluster-dns-relation-changed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/cluster-dns-relation-created": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/cluster-dns-relation-departed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/cluster-dns-relation-joined": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], "hooks/cni-relation-broken": [ "layer:basic", "dynamic", @@ -1002,6 +977,56 @@ "static", "e5138d13492aa9a90379e8fce4a85c612481e7bc27a49958edbbfcaaf06f03a6" ], + "hooks/loadbalancer-external-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-external-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-broken": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-changed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-created": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-departed": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], + "hooks/loadbalancer-internal-relation-joined": [ + "layer:basic", + "dynamic", + "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" + ], "hooks/loadbalancer-relation-broken": [ "layer:basic", "dynamic", @@ -1250,7 +1275,7 @@ "hooks/relations/azure-integration/provides.py": [ "interface:azure-integration", "static", - "a3a1de7f79c5f2cc37f2dff450d8e9b2ce36c63c0328bb6bedd2ade7519a7442" + "33af701c7abd51e869de945c1f032749136c66560bb604e8e72521dc9d7e495b" ], "hooks/relations/azure-integration/pydocmd.yml": [ "interface:azure-integration", @@ -1260,7 +1285,7 @@ "hooks/relations/azure-integration/requires.py": [ "interface:azure-integration", "static", - "112bfa057cdcf91a812dea080330e9323f4d7e4b1bcacfd69b3ad95dd2274cbb" + "2e60fecf8bc65d84124742d0833afc90d2e839f5dfa2923e8d1849063c51f47a" ], "hooks/relations/ceph-admin/.gitignore": [ "interface:ceph-admin", @@ -1310,7 +1335,7 @@ "hooks/relations/ceph-client/lib/base_requires.py": [ "interface:ceph-client", "static", - "1e7ac024219e39ac3840a913891b17c2e32d69c2a74bad4464b4e67ef5bd80c0" + "105fd680689b85516e0768da7e114dd5fc3b5fb7970ab7bb6d00122c81f7b3e1" ], "hooks/relations/ceph-client/provides.py": [ "interface:ceph-client", @@ -1532,11 +1557,6 @@ "static", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ], - "hooks/relations/hacluster/common.py": [ - "interface:hacluster", - "static", - "cd9f765e2c3ff64a592c8e144a36783e48c1033413cbece2c4f579195cb7ff5e" - ], "hooks/relations/hacluster/copyright": [ "interface:hacluster", "static", @@ -1545,17 +1565,27 @@ "hooks/relations/hacluster/interface.yaml": [ "interface:hacluster", "static", - "51bcf4e36b973600d567cf96783bdee3eaa6e164275f70b69e2e47e3468c8c8b" + "5f4e6c8d7b2884bdceeee422821f4db7163dbfa7994d86cb405ffef2c3dea43c" + ], + "hooks/relations/hacluster/interface_hacluster/__init__.py": [ + "interface:hacluster", + "static", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ], + "hooks/relations/hacluster/interface_hacluster/common.py": [ + "interface:hacluster", + "static", + "abcc0d2940d142976ccfa3fa7518227549ee13041292af7ad61101a7d0c02f7e" ], "hooks/relations/hacluster/requires.py": [ "interface:hacluster", "static", - "eb752e55844ffbfddf9a98e80ac282ff832ab667c1a33b743940babbd048bb17" + "68cf3ed22af30e42f34fc70ca484e8e4eeaedac6410bd3f228677cc791e6f46c" ], "hooks/relations/hacluster/test-requirements.txt": [ "interface:hacluster", "static", - "2c37d84ada8578ba5ed44f99f10470710c91d370052a867541f31b5c6a357b07" + "63756e4b1c67bc161cee0d30d460dbb83911b2c064dc1c55454a30c1ab877616" ], "hooks/relations/http/.gitignore": [ "interface:http", @@ -1660,12 +1690,12 @@ "hooks/relations/kube-control/provides.py": [ "interface:kube-control", "static", - "5dffb8504d0993ad756b0631fd82ef465dc9127641b448bea76596fc6f3e55c4" + "08e090bb3ad51e5825590ad0dee077288648b171764480afc20205c740fa15be" ], "hooks/relations/kube-control/requires.py": [ "interface:kube-control", "static", - "496ed9b2d4f6fef2e1e26b53b8f8c97e67b9a96b4fcfcb40ef671d2469b983e3" + "a064ad0b75081439faeda7fb948934bfd86a7ab2079d25b2ad47aa5fa32c2a6f" ], "hooks/relations/kube-dns/README.md": [ "interface:kube-dns", @@ -1712,15 +1742,15 @@ "static", "2237030141571ef6acb1934a724f1620164bb2ddf08450aab23d14b0dc7b84b7" ], + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", + "static", + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" + ], "hooks/relations/kubernetes-cni/.gitignore": [ "interface:kubernetes-cni", "static", - "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e" - ], - "hooks/relations/kubernetes-cni/.travis.yml": [ - "interface:kubernetes-cni", - "static", - "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" ], "hooks/relations/kubernetes-cni/README.md": [ "interface:kubernetes-cni", @@ -1740,12 +1770,12 @@ "hooks/relations/kubernetes-cni/provides.py": [ "interface:kubernetes-cni", "static", - "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e" + "e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee" ], "hooks/relations/kubernetes-cni/requires.py": [ "interface:kubernetes-cni", "static", - "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa" + "45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe" ], "hooks/relations/nrpe-external-master/README.md": [ "interface:nrpe-external-master", @@ -2175,7 +2205,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -2190,12 +2220,12 @@ "lib/charms/layer/kubernetes_common.py": [ "layer:kubernetes-common", "static", - "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101" + "29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b" ], "lib/charms/layer/kubernetes_master.py": [ "kubernetes-master", "static", - "e270581b23946f18907a178ffd68145629b1df804234acf48f12bf9bed62a173" + "b84b823a2198833b2dcaae1b1a04c7f64cfb24838fa0d3e43e2ef106be6e8af0" ], "lib/charms/layer/nagios.py": [ "layer:nagios", @@ -2210,7 +2240,7 @@ "lib/charms/layer/snap.py": [ "layer:snap", "static", - "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be" + "f278a3b06a1604e1c59f107d2ff3e9f5705e3c6c7be7a012c1a500d0fc8925df" ], "lib/charms/layer/status.py": [ "layer:status", @@ -2225,7 +2255,7 @@ "lib/charms/layer/vault_kv.py": [ "layer:vault-kv", "static", - "f34f0ae1d6b8f5d2811b1f4d6cd8edc4cdbe6e0aa5d3e9a31bbd8ba69e146fd8" + "bd902b2cbe0be6cab87e991066aa2d31a2aac429de2e168686469cd8a3ad6a46" ], "lib/charms/layer/vaultlocker.py": [ "layer:vaultlocker", @@ -2255,7 +2285,7 @@ "metadata.yaml": [ "kubernetes-master", "dynamic", - "44ac6a138bbab869dc02bc9888bf2e934c9c52af2a90a04ba4c7a4f46ee70bdc" + "246c439bfb9fb05310ff536411c37c644fe013506d2648a259d801a3f899bd43" ], "metrics.yaml": [ "kubernetes-master", @@ -2300,7 +2330,7 @@ "reactive/kubernetes_master.py": [ "kubernetes-master", "static", - "3e15e7d009b721b0dd6294f7fab4a8f59b4b4a217d6256960956c5c62842807a" + "59cc8fa74c76d0e33185e4733c569853871a090184227a2f1f09bd03bc9ad170" ], "reactive/kubernetes_master_worker_base.py": [ "layer:kubernetes-master-worker-base", @@ -2357,16 +2387,16 @@ "static", "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029" ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], "templates/cdk.master.auth-webhook-conf.yaml": [ "kubernetes-master", "static", "11df8c0c1a4157e7a552b864188df1dcdc99153a8b359667b640937251bad678" ], - "templates/cdk.master.auth-webhook-secret.yaml": [ - "kubernetes-master", - "static", - "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" - ], "templates/cdk.master.auth-webhook.logrotate": [ "kubernetes-master", "static", @@ -2375,12 +2405,12 @@ "templates/cdk.master.auth-webhook.py": [ "kubernetes-master", "static", - "88c215f457d93edf2870b626ff5d0f9d8bb8d8df59507c42db68cf734f80f5c7" + "1e4958fa0273d5754e6fa9703ee4256bdc31ac0fc7d46ce85277e17f4b8c738d" ], "templates/cdk.master.auth-webhook.service": [ "kubernetes-master", "static", - "db47a820795f2c288e0bffa775b7bc28df9b9157157d8ff5dc8ac69f7911f057" + "960c8d38e928ef3f1c9f29958226656c6a339a4834b30664b032c10f98eb7f62" ], "templates/cdk.master.leader.file-watcher.path": [ "kubernetes-master", @@ -2482,11 +2512,6 @@ "static", "abb77f196e008fc636c254c89672bb889ca34a91103972c11a5e2e59aa608400" ], - "templates/rbd-persistent-volume.yaml": [ - "kubernetes-master", - "static", - "bdee575ef92912dda50d2e82aafab359168aac32a78de2bd9131bcb554669966" - ], "templates/service-always-restart.systemd-229.conf": [ "kubernetes-master", "static", @@ -2517,6 +2542,26 @@ "static", "7428fcfb91731d37be14a0f8d4c5923cc95a28bd28579c5a013928ab147b0beb" ], + "tests/functional/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/unit/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4" + ], "tox.ini": [ "layer:vaultlocker", "static", @@ -2525,17 +2570,12 @@ "version": [ "kubernetes-master", "dynamic", - "b14065491445a78e202c652a03eeb39145fa129707d518a602bd1f66517791a0" + "8b6f22cae011f81db35e9721985b8a6310f5d247c83e98368afd0c1ac7dce419" ], "wheelhouse.txt": [ "kubernetes-master", "dynamic", - "5e9ee16ba4e28c49c5c745ae19b6a149a01cca6c22c47feed0954c10f935ca7e" - ], - "wheelhouse/Flask-1.1.2.tar.gz": [ - "__pip__", - "dynamic", - "4efa1ae2d7c9865af48986de8aeb8504bf32c7f3d6fdc9353d34b21f4b127060" + "9308d42bcacaf5feb0e156848796f550fac48780743f72d41fd2cf75e877b67e" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -2543,7 +2583,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -2557,65 +2597,110 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/Werkzeug-1.0.1.tar.gz": [ - "__pip__", + "wheelhouse/aiohttp-3.7.4.post0.tar.gz": [ + "kubernetes-master", "dynamic", - "6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c" + "493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf" ], - "wheelhouse/certifi-2020.12.5.tar.gz": [ + "wheelhouse/async-timeout-3.0.1.tar.gz": [ "__pip__", "dynamic", - "1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c" + "0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f" + ], + "wheelhouse/attrs-21.2.0.tar.gz": [ + "__pip__", + "dynamic", + "ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb" + ], + "wheelhouse/cached-property-1.5.2.tar.gz": [ + "__pip__", + "dynamic", + "9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130" + ], + "wheelhouse/certifi-2021.10.8.tar.gz": [ + "__pip__", + "dynamic", + "78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872" ], "wheelhouse/chardet-4.0.0.tar.gz": [ "__pip__", "dynamic", "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa" ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", "dynamic", "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616" ], - "wheelhouse/click-7.1.2.tar.gz": [ + "wheelhouse/charset-normalizer-2.0.7.tar.gz": [ "__pip__", "dynamic", - "d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a" + "e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0" ], "wheelhouse/gunicorn-20.1.0.tar.gz": [ "kubernetes-master", "dynamic", "e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8" ], - "wheelhouse/hvac-0.10.10.tar.gz": [ + "wheelhouse/hvac-0.11.2.tar.gz": [ "layer:vault-kv", "dynamic", - "80888c009c7e310a34d480ce45fb33a44b479cb9b8a3f3c467b6ffcfff0569f4" + "f905c59d32d88d3f67571fe5a8a78de4659e04798ad809de439f667247d13626" ], - "wheelhouse/idna-2.10.tar.gz": [ + "wheelhouse/idna-3.3.tar.gz": [ "__pip__", "dynamic", - "b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6" + "9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d" ], - "wheelhouse/itsdangerous-1.1.0.tar.gz": [ + "wheelhouse/idna-ssl-1.1.0.tar.gz": [ "__pip__", "dynamic", - "321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19" + "a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c" + ], + "wheelhouse/loadbalancer_interface-1.1.1.tar.gz": [ + "kubernetes-master", + "dynamic", + "c71d50bb66286d6e15a5f2975c0a316a3cd43c2042428258c96d1b4b95e5706b" + ], + "wheelhouse/marshmallow-3.14.0.tar.gz": [ + "__pip__", + "dynamic", + "bba1a940985c052c5cc7849f97da196ebc81f3b85ec10c56ef1f3228aa9cbe74" + ], + "wheelhouse/marshmallow-enum-1.5.1.tar.gz": [ + "__pip__", + "dynamic", + "38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58" + ], + "wheelhouse/multidict-5.2.0.tar.gz": [ + "__pip__", + "dynamic", + "0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce" ], "wheelhouse/netaddr-0.7.19.tar.gz": [ "layer:basic", "dynamic", "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd" ], - "wheelhouse/netifaces-0.10.9.tar.gz": [ + "wheelhouse/netifaces-0.11.0.tar.gz": [ "layer:vault-kv", "dynamic", - "2dee9ffdd16292878336a58d04a20f0ffe95555465fee7c9bd23b3490ef2abf3" + "043a79146eb2907edf439899f262b3dfe41717d34124298ed281139a8b93ca32" + ], + "wheelhouse/ops-1.2.0.tar.gz": [ + "__pip__", + "dynamic", + "3deb00ad7952b203502290a79bf8c8ce9b70e4f34fec3307fd45133c97a45824" + ], + "wheelhouse/ops_reactive_interface-1.0.1.tar.gz": [ + "__pip__", + "dynamic", + "9ed351c42fc187299c23125975aa3dfee9f6aaae0c9d49bce8904ac079255dba" ], "wheelhouse/pbr-5.6.0.tar.gz": [ "__pip__", @@ -2632,15 +2717,15 @@ "dynamic", "0c9ccb99ab76025f2f0bbecf341d4656e9c1351db8cc8a03ccd62e318ab4b5c6" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], - "wheelhouse/requests-2.25.1.tar.gz": [ + "wheelhouse/requests-2.26.0.tar.gz": [ "__pip__", "dynamic", - "27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804" + "b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -2652,25 +2737,35 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], - "wheelhouse/tenacity-7.0.0.tar.gz": [ + "wheelhouse/tenacity-5.0.3.tar.gz": [ "layer:snap", "dynamic", - "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1" + "24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b" ], - "wheelhouse/urllib3-1.26.4.tar.gz": [ + "wheelhouse/typing_extensions-3.10.0.2.tar.gz": [ "__pip__", "dynamic", - "e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" + "49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e" + ], + "wheelhouse/urllib3-1.26.7.tar.gz": [ + "__pip__", + "dynamic", + "4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", "dynamic", "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646" + ], + "wheelhouse/yarl-1.7.0.tar.gz": [ + "__pip__", + "dynamic", + "8e7ebaf62e19c2feb097ffb7c94deb0f0c9fab52590784c8cd679d30ab009162" ] } } \ No newline at end of file diff --git a/kubernetes-master/.github/workflows/main.yml b/kubernetes-master/.github/workflows/main.yml index ded79fa..27f47a8 100644 --- a/kubernetes-master/.github/workflows/main.yml +++ b/kubernetes-master/.github/workflows/main.yml @@ -2,8 +2,8 @@ name: Test Suite on: [pull_request] jobs: - lint: - name: Lint + lint-unit-wheelhouse: + name: Lint, Unit, Wheelhouse runs-on: ubuntu-latest strategy: matrix: @@ -18,26 +18,13 @@ jobs: - name: Install Dependencies run: | pip install tox - - name: Run lint + sudo snap install charm --classic + - name: Lint run: tox -vve lint - unit-test: - name: Unit Tests - runs-on: ubuntu-latest - strategy: - matrix: - python: [3.6, 3.7, 3.8, 3.9] - steps: - - name: Check out code - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python }} - - name: Install Dependencies - run: | - pip install tox - - name: Run test - run: tox -vve unit + - name: Unit Tests + run: tox -vve unit + - name: Validate Wheelhouse + run: tox -vve validate-wheelhouse integration-test: name: Integration test with VMWare runs-on: self-hosted @@ -49,8 +36,12 @@ jobs: uses: actions/setup-python@v2 with: python-version: 3.8 - - name: Install Dependencies - run: | - pip install tox + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@main + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764" - name: Run test run: tox -e integration diff --git a/kubernetes-master/.travis/profile-update.yaml b/kubernetes-master/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/kubernetes-master/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/kubernetes-master/actions.yaml b/kubernetes-master/actions.yaml index 8abf0cb..3dfa89f 100644 --- a/kubernetes-master/actions.yaml +++ b/kubernetes-master/actions.yaml @@ -38,34 +38,6 @@ "restart": "description": "Restart the Kubernetes master services on demand." -"create-rbd-pv": - "description": "Create RADOS Block Device (RDB) volume in Ceph and creates PersistentVolume.\ - \ Note this is deprecated on Kubernetes >= 1.10 in favor of CSI, where PersistentVolumes\ - \ are created dynamically to back PersistentVolumeClaims." - "params": - "name": - "type": "string" - "description": "Name the persistent volume." - "minLength": !!int "1" - "size": - "type": "integer" - "description": "Size in MB of the RBD volume." - "minimum": !!int "1" - "mode": - "type": "string" - "default": "ReadWriteOnce" - "description": "Access mode for the persistent volume." - "filesystem": - "type": "string" - "default": "xfs" - "description": "File system type to format the volume." - "skip-size-check": - "type": "boolean" - "default": !!bool "false" - "description": "Allow creation of overprovisioned RBD." - "required": - - "name" - - "size" "namespace-list": "description": "List existing k8s namespaces" "namespace-create": diff --git a/kubernetes-master/actions/create-rbd-pv b/kubernetes-master/actions/create-rbd-pv deleted file mode 100755 index 22a0c88..0000000 --- a/kubernetes-master/actions/create-rbd-pv +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/local/sbin/charm-env python3 - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charms.reactive import is_state -from charmhelpers.core.templating import render -from charmhelpers.core.hookenv import action_get -from charmhelpers.core.hookenv import action_set -from charmhelpers.core.hookenv import action_fail -from subprocess import check_call -from subprocess import check_output -from subprocess import CalledProcessError -from tempfile import TemporaryDirectory -import json -import re -import os -import sys - -from charms.layer.kubernetes_master import install_ceph_common - - -os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') - - -def main(): - """Control logic to enlist Ceph RBD volumes as PersistentVolumes in - Kubernetes. This will invoke the validation steps, and only execute if - this script thinks the environment is 'sane' enough to provision volumes. - - :return: None - """ - # k8s >= 1.10 uses CSI and doesn't directly create persistent volumes. - if get_version('kube-apiserver') >= (1, 10): - print('This action is deprecated in favor of CSI creation of') - print('persistent volumes in Kubernetes >= 1.10. Just create the PVC') - print('and a PV will be created for you.') - action_fail('Deprecated, just create PVC.') - return - - # validate relationship pre-reqs before additional steps can be taken. - if not validate_relation(): - print('Failed ceph relationship check') - action_fail('Failed ceph relationship check') - return - - if not is_ceph_healthy(): - print('Ceph was not healthy.') - action_fail('Ceph was not healthy.') - return - - install_ceph_common() - - context = {} - - context['RBD_NAME'] = action_get_or_default('name').strip() - context['RBD_SIZE'] = action_get_or_default('size') - context['RBD_FS'] = action_get_or_default('filesystem').strip() - context['PV_MODE'] = action_get_or_default('mode').strip() - - # Ensure we're not exceeding available space in the pool - if not validate_space(context['RBD_SIZE']): - return - - # Ensure our parameters match - param_validation = validate_parameters(context['RBD_NAME'], - context['RBD_FS'], - context['PV_MODE']) - if not param_validation == 0: - return - - if not validate_unique_volume_name(context['RBD_NAME']): - action_fail('Volume name collision detected. Volume creation aborted.') - return - - context['monitors'] = get_monitors() - - # Invoke creation and format the mount device - create_rbd_volume(context['RBD_NAME'], - context['RBD_SIZE'], - context['RBD_FS']) - - # Create a temporary workspace to render our persistentVolume template, and - # enlist the RDB based PV we've just created - with TemporaryDirectory() as active_working_path: - temp_template = '{}/pv.yaml'.format(active_working_path) - render('rbd-persistent-volume.yaml', temp_template, context) - - cmd = ['kubectl', 'create', '-f', temp_template] - debug_command(cmd) - check_call(cmd) - - -def get_version(bin_name): - """Get the version of an installed Kubernetes binary. - - :param str bin_name: Name of binary - :return: 3-tuple version (maj, min, patch) - - Example:: - - >>> `get_version('kubelet') - (1, 6, 0) - - """ - cmd = '{} --version'.format(bin_name).split() - version_string = check_output(cmd).decode('utf-8') - return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) - - -def action_get_or_default(key): - ''' Convenience method to manage defaults since actions dont appear to - properly support defaults ''' - - value = action_get(key) - if value: - return value - elif key == 'filesystem': - return 'xfs' - elif key == 'size': - return 0 - elif key == 'mode': - return "ReadWriteOnce" - elif key == 'skip-size-check': - return False - else: - return '' - - -def create_rbd_volume(name, size, filesystem): - ''' Create the RBD volume in Ceph. Then mount it locally to format it for - the requested filesystem. - - :param name - The name of the RBD volume - :param size - The size in MB of the volume - :param filesystem - The type of filesystem to format the block device - ''' - - # Create the rbd volume - # $ rbd create foo --size 50 --image-feature layering - command = ['rbd', 'create', '--size', '{}'.format(size), '--image-feature', - 'layering', name] - debug_command(command) - check_call(command) - - # Lift the validation sequence to determine if we actually created the - # rbd volume - if validate_unique_volume_name(name): - # we failed to create the RBD volume. whoops - action_fail('RBD Volume not listed after creation.') - print('Ceph RBD volume {} not found in rbd list'.format(name)) - # hack, needs love if we're killing the process thread this deep in - # the call stack. - sys.exit(0) - - mount = ['rbd', 'map', name] - debug_command(mount) - device_path = check_output(mount).strip() - - try: - format_command = ['mkfs.{}'.format(filesystem), device_path] - debug_command(format_command) - check_call(format_command) - unmount = ['rbd', 'unmap', name] - debug_command(unmount) - check_call(unmount) - except CalledProcessError: - print('Failed to format filesystem and unmount. RBD created but not' - ' enlisted.') - action_fail('Failed to format filesystem and unmount.' - ' RDB created but not enlisted.') - - -def is_ceph_healthy(): - ''' Probe the remote ceph cluster for health status ''' - command = ['ceph', 'health'] - debug_command(command) - health_output = check_output(command) - if b'HEALTH_OK' in health_output: - return True - else: - return False - - -def get_monitors(): - ''' Parse the monitors out of /etc/ceph/ceph.conf ''' - found_hosts = [] - # This is kind of hacky. We should be piping this in from juju relations - with open('/etc/ceph/ceph.conf', 'r') as ceph_conf: - for line in ceph_conf.readlines(): - if 'mon host' in line: - # strip out the key definition - hosts = line.lstrip('mon host = ').split(' ') - for host in hosts: - found_hosts.append(host) - return found_hosts - - -def get_available_space(): - ''' Determine the space available in the RBD pool. Throw an exception if - the RBD pool ('rbd') isn't found. ''' - command = 'ceph df -f json'.split() - debug_command(command) - out = check_output(command).decode('utf-8') - data = json.loads(out) - for pool in data['pools']: - if pool['name'] == 'rbd': - return int(pool['stats']['max_avail'] / (1024 * 1024)) - raise UnknownAvailableSpaceException('Unable to determine available space.') # noqa - - -def validate_unique_volume_name(name): - ''' Poll the CEPH-MON services to determine if we have a unique rbd volume - name to use. If there is naming collisions, block the request for volume - provisioning. - - :param name - The name of the RBD volume - ''' - - command = ['rbd', 'list'] - debug_command(command) - raw_out = check_output(command) - - # Split the output on newlines - # output spec: - # $ rbd list - # foo - # foobar - volume_list = raw_out.decode('utf-8').splitlines() - - for volume in volume_list: - if volume.strip() == name: - return False - - return True - - -def validate_relation(): - ''' Determine if we are related to ceph. If we are not, we should - note this in the action output and fail this action run. We are relying - on specific files in specific paths to be placed in order for this function - to work. This method verifies those files are placed. ''' - - # TODO: Validate that the ceph-common package is installed - if not is_state('ceph-storage.available'): - message = 'Failed to detect connected ceph-mon' - print(message) - action_set({'pre-req.ceph-relation': message}) - return False - - if not os.path.isfile('/etc/ceph/ceph.conf'): - message = 'No Ceph configuration found in /etc/ceph/ceph.conf' - print(message) - action_set({'pre-req.ceph-configuration': message}) - return False - - # TODO: Validate ceph key - - return True - - -def validate_space(size): - if action_get_or_default('skip-size-check'): - return True - available_space = get_available_space() - if available_space < size: - msg = 'Unable to allocate RBD of size {}MB, only {}MB are available.' - action_fail(msg.format(size, available_space)) - return False - return True - - -def validate_parameters(name, fs, mode): - ''' Validate the user inputs to ensure they conform to what the - action expects. This method will check the naming characters used - for the rbd volume, ensure they have selected a fstype we are expecting - and the mode against our whitelist ''' - name_regex = '^[a-zA-z0-9][a-zA-Z0-9|-]' - - fs_whitelist = ['xfs', 'ext4'] - - # see http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes - # for supported operations on RBD volumes. - mode_whitelist = ['ReadWriteOnce', 'ReadOnlyMany'] - - fails = 0 - - if not re.match(name_regex, name): - message = 'Validation failed for RBD volume-name' - action_fail(message) - fails = fails + 1 - action_set({'validation.name': message}) - - if fs not in fs_whitelist: - message = 'Validation failed for file system' - action_fail(message) - fails = fails + 1 - action_set({'validation.filesystem': message}) - - if mode not in mode_whitelist: - message = "Validation failed for mode" - action_fail(message) - fails = fails + 1 - action_set({'validation.mode': message}) - - return fails - - -def debug_command(cmd): - ''' Print a debug statement of the command invoked ''' - print("Invoking {}".format(cmd)) - - -class UnknownAvailableSpaceException(Exception): - pass - - -if __name__ == '__main__': - main() diff --git a/kubernetes-master/actions/user-create b/kubernetes-master/actions/user-create index 3e6828c..7e7785a 100755 --- a/kubernetes-master/actions/user-create +++ b/kubernetes-master/actions/user-create @@ -32,7 +32,7 @@ def protect_resources(name): def user_list(): """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" - secrets = layer.kubernetes_master.get_secret_names() + secrets = layer.kubernetes_common.get_secret_names() action_set({"users": ", ".join(list(secrets))}) return secrets @@ -57,7 +57,7 @@ def user_create(): # TODO: make the token format less magical so it doesn't get out of # sync with the function that creates secrets in k8s-master.py. token = "{}::{}".format(user, layer.kubernetes_master.token_generator()) - if not layer.kubernetes_master.create_secret(token, user, user, groups): + if not layer.kubernetes_common.create_secret(token, user, user, groups): action_fail("Failed to create secret for: {}".format(user)) return diff --git a/kubernetes-master/actions/user-delete b/kubernetes-master/actions/user-delete index 3e6828c..7e7785a 100755 --- a/kubernetes-master/actions/user-delete +++ b/kubernetes-master/actions/user-delete @@ -32,7 +32,7 @@ def protect_resources(name): def user_list(): """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" - secrets = layer.kubernetes_master.get_secret_names() + secrets = layer.kubernetes_common.get_secret_names() action_set({"users": ", ".join(list(secrets))}) return secrets @@ -57,7 +57,7 @@ def user_create(): # TODO: make the token format less magical so it doesn't get out of # sync with the function that creates secrets in k8s-master.py. token = "{}::{}".format(user, layer.kubernetes_master.token_generator()) - if not layer.kubernetes_master.create_secret(token, user, user, groups): + if not layer.kubernetes_common.create_secret(token, user, user, groups): action_fail("Failed to create secret for: {}".format(user)) return diff --git a/kubernetes-master/actions/user-list b/kubernetes-master/actions/user-list index 3e6828c..7e7785a 100755 --- a/kubernetes-master/actions/user-list +++ b/kubernetes-master/actions/user-list @@ -32,7 +32,7 @@ def protect_resources(name): def user_list(): """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" - secrets = layer.kubernetes_master.get_secret_names() + secrets = layer.kubernetes_common.get_secret_names() action_set({"users": ", ".join(list(secrets))}) return secrets @@ -57,7 +57,7 @@ def user_create(): # TODO: make the token format less magical so it doesn't get out of # sync with the function that creates secrets in k8s-master.py. token = "{}::{}".format(user, layer.kubernetes_master.token_generator()) - if not layer.kubernetes_master.create_secret(token, user, user, groups): + if not layer.kubernetes_common.create_secret(token, user, user, groups): action_fail("Failed to create secret for: {}".format(user)) return diff --git a/kubernetes-master/actions/user_actions.py b/kubernetes-master/actions/user_actions.py index 3e6828c..7e7785a 100755 --- a/kubernetes-master/actions/user_actions.py +++ b/kubernetes-master/actions/user_actions.py @@ -32,7 +32,7 @@ def protect_resources(name): def user_list(): """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" - secrets = layer.kubernetes_master.get_secret_names() + secrets = layer.kubernetes_common.get_secret_names() action_set({"users": ", ".join(list(secrets))}) return secrets @@ -57,7 +57,7 @@ def user_create(): # TODO: make the token format less magical so it doesn't get out of # sync with the function that creates secrets in k8s-master.py. token = "{}::{}".format(user, layer.kubernetes_master.token_generator()) - if not layer.kubernetes_master.create_secret(token, user, user, groups): + if not layer.kubernetes_common.create_secret(token, user, user, groups): action_fail("Failed to create secret for: {}".format(user)) return diff --git a/kubernetes-master/config.yaml b/kubernetes-master/config.yaml index ba7cf05..e776521 100644 --- a/kubernetes-master/config.yaml +++ b/kubernetes-master/config.yaml @@ -174,15 +174,6 @@ Audit webhook config passed to kube-apiserver via --audit-webhook-config-file. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ - "addons-registry": - "type": "string" - "default": "" - "description": | - Specify the docker registry to use when applying addons. - - DEPRECATED in 1.15: Use the broader 'image-registry' config option instead. If both - options are set, 'addons-registry' will be used to configure the cdk-addons snap until - v1.17 is released. After that, the 'addons-registry' option will have no effect. "image-registry": "type": "string" "default": "rocks.canonical.com:443/cdk" @@ -221,8 +212,10 @@ "service-cidr": "type": "string" "default": "10.152.183.0/24" - "description": "CIDR to user for Kubernetes services. Cannot be changed after\ - \ deployment." + "description": | + CIDR to use for Kubernetes services. After deployment it is + only possible to increase the size of the IP range. It is not possible to + change or shrink the address range after deployment. "allow-privileged": "type": "string" "default": "auto" @@ -243,7 +236,7 @@ will not be loaded. "channel": "type": "string" - "default": "1.21/stable" + "default": "1.23/edge" "description": | Snap channel to install Kubernetes master services from "client_password": @@ -418,26 +411,6 @@ kubernetes-worker:kube-api-endpoint. If using the kubeapi-load-balancer, see the loadbalancer-ips configuration variable on the kubeapi-load-balancer charm. "default": "" - "monitoring-storage": - "type": "string" - "description": | - Configuration to set up volume for influxdb/grafana. - e.g - influxdb: - hostPath: - path: /influxdb - type: Directory - grafana: - hostPath: - path: /grafana - type: Directory - - DEPRECATED: this option has no effect on Kubernetes 1.18 and above. - "default": | - influxdb: - emptyDir: {} - grafana: - emptyDir: {} "default-cni": "type": "string" "description": | @@ -463,3 +436,13 @@ respond with appropriate authentication details. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + "pod-security-policy": + "type": "string" + "default": "" + "description": |- + Default RBAC pod security policy [0] and privileged cluster roles formatted + as a YAML file as a string. + A good example of a PSP policy can be found here [1]. + + [0] https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + [1] https://github.com/kubernetes/examples/blob/master/staging/podsecuritypolicy/rbac/policies.yaml diff --git a/kubernetes-master/docs/README b/kubernetes-master/docs/README new file mode 100644 index 0000000..9973bb8 --- /dev/null +++ b/kubernetes-master/docs/README @@ -0,0 +1 @@ +This docs directory is currently experimental. Please do not make changes to the docs here as any edits may be lost diff --git a/kubernetes-master/docs/index.md b/kubernetes-master/docs/index.md new file mode 100644 index 0000000..f644122 --- /dev/null +++ b/kubernetes-master/docs/index.md @@ -0,0 +1,832 @@ + + +This charm is an encapsulation of the Kubernetes master processes and the +operations to run on any cloud for the entire lifecycle of the cluster. + +This charm is built from other charm layers using the Juju reactive framework. +The other layers focus on specific subset of operations making this layer +specific to operations of Kubernetes master processes. + +# Deployment + +This charm is not fully functional when deployed by itself. It requires other +charms to model a complete Kubernetes cluster. A Kubernetes cluster needs a +distributed key value store such as [Etcd](https://coreos.com/etcd/) and the +kubernetes-worker charm which delivers the Kubernetes node services. A cluster +requires a Software Defined Network (SDN), a Container Runtime such as +[containerd](https://jaas.ai/u/containers/containerd), and Transport Layer +Security (TLS) so the components in a cluster communicate securely. + +Please take a look at the [Charmed Kubernetes]( https://jaas.ai/charmed-kubernetes) +or the [Kubernetes core](https://jaas.ai/kubernetes-core) bundles for +examples of complete models of Kubernetes clusters. + +# Resources + +The kubernetes-master charm takes advantage of the [Juju Resources](https://jaas.ai/docs/juju-resources) +feature to deliver the Kubernetes software. + +In deployments on public clouds the Charm Store provides the resource to the +charm automatically with no user intervention. Some environments with strict +firewall rules may not be able to contact the Charm Store. In these network +restricted environments the resource can be uploaded to the model by the Juju +operator. + +#### Snap Refresh + +The kubernetes resources used by this charm are snap packages. When not +specified during deployment, these resources come from the public store. By +default, the `snapd` daemon will refresh all snaps installed from the store +four (4) times per day. A charm configuration option is provided for operators +to control this refresh frequency. + +>NOTE: this is a global configuration option and will affect the refresh +time for all snaps installed on a system. + +Examples: + +```sh +## refresh kubernetes-master snaps every tuesday +juju config kubernetes-master snapd_refresh="tue" + +## refresh snaps at 11pm on the last (5th) friday of the month +juju config kubernetes-master snapd_refresh="fri5,23:00" + +## delay the refresh as long as possible +juju config kubernetes-master snapd_refresh="max" + +## use the system default refresh timer +juju config kubernetes-master snapd_refresh="" +``` + +For more information, see the [snap documentation](/kubernetes/docs/snap-refresh). + +## Configuration + +This charm supports some configuration options to set up a Kubernetes cluster +that works in your environment, detailed in the section below. + +For some specific Kubernetes service configuration tasks, please also see the +section on [configuring K8s services](#k8s-services). + + + + + + +| name | type | Default | Description | +|------|--------|--------------|-------------------------------------------| +| allow-privileged | string | auto | [See notes](#allow-privileged-description) | +| api-extra-args | string | | [See notes](#api-extra-args-description) | +| audit-policy | string | [See notes](#audit-policy-default) | Audit policy passed to kube-apiserver via --audit-policy-file. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ | +| audit-webhook-config | string | | Audit webhook config passed to kube-apiserver via --audit-webhook-config-file. For more info, please refer to the upstream documentation at https://kubernetes.io/docs/tasks/debug-application-cluster/audit/ | +| authorization-mode | string | AlwaysAllow | Comma separated authorization modes. Allowed values are "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow". | +| channel | string | 1.17/stable | Snap channel to install Kubernetes master services from | +| client_password | string | | Password to be used for admin user (leave empty for random password). | +| controller-manager-extra-args | string | | [See notes](#controller-manager-extra-args-description) | +| dashboard-auth | string | auto | [See notes](#dashboard-auth-description) | +| default-storage | string | auto | The storage class to make the default storage class. Allowed values are "auto", "none", "ceph-xfs", "ceph-ext4". Note: Only works in Kubernetes >= 1.10 | +| dns-provider | string | auto | [See notes](#dns-provider-description) | +| dns_domain | string | cluster.local | The local domain for cluster dns | +| enable-dashboard-addons | boolean | True | Deploy the Kubernetes Dashboard and Heapster addons | +| enable-keystone-authorization | boolean | False | If true and the Keystone charm is related, users will authorize against the Keystone server. Note that if related, users will always authenticate against Keystone. | +| enable-metrics | boolean | True | If true the metrics server for Kubernetes will be deployed onto the cluster. | +| enable-nvidia-plugin | string | auto | Load the nvidia device plugin daemonset. Supported values are "auto" and "false". When "auto", the daemonset will be loaded only if GPUs are detected. When "false" the nvidia device plugin will not be loaded. | +| extra_packages | string | | Space separated list of extra deb packages to install. | +| extra_sans | string | | Space-separated list of extra SAN entries to add to the x509 certificate created for the master nodes. | +| ha-cluster-dns | string | | DNS entry to use with the HA Cluster subordinate charm. Mutually exclusive with ha-cluster-vip. | +| ha-cluster-vip | string | | Virtual IP for the charm to use with the HA Cluster subordinate charm Mutually exclusive with ha-cluster-dns. Multiple virtual IPs are separated by spaces. | +| image-registry | string | [See notes](#image-registry-default) | Container image registry to use for CDK. This includes addons like the Kubernetes dashboard, metrics server, ingress, and dns along with non-addon images including the pause container and default backend image. | +| install_keys | string | | [See notes](#install_keys-description) | +| install_sources | string | | [See notes](#install_sources-description) | +| keystone-policy | string | [See notes](#keystone-policy-default) | Policy for Keystone authorization. This is used when a Keystone charm is related to kubernetes-master in order to provide authorization for Keystone users on the Kubernetes cluster. | +| keystone-ssl-ca | string | | Keystone certificate authority encoded in base64 for securing communications to Keystone. For example: `juju config kubernetes-master keystone-ssl-ca=$(base64 /path/to/ca.crt)` | +| loadbalancer-ips | string | | [See notes](#loadbalancer-ips-description) | +| nagios_context | string | juju | [See notes](#nagios_context-description) | +| nagios_servicegroups | string | | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup | +| package_status | string | install | The status of service-affecting packages will be set to this value in the dpkg database. Valid values are "install" and "hold". | +| proxy-extra-args | string | | [See notes](#proxy-extra-args-description) | +| require-manual-upgrade | boolean | True | When true, master nodes will not be upgraded until the user triggers it manually by running the upgrade action. | +| scheduler-extra-args | string | | [See notes](#scheduler-extra-args-description) | +| service-cidr | string | 10.152.183.0/24 | CIDR to user for Kubernetes services. Cannot be changed after deployment. | +| snap_proxy | string | | DEPRECATED. Use snap-http-proxy and snap-https-proxy model configuration settings. HTTP/HTTPS web proxy for Snappy to use when accessing the snap store. | +| snap_proxy_url | string | | DEPRECATED. Use snap-store-proxy model configuration setting. The address of a Snap Store Proxy to use for snaps e.g. http://snap-proxy.example.com | +| snapd_refresh | string | max | [See notes](#snapd_refresh-description) | +| storage-backend | string | auto | The storage backend for kube-apiserver persistence. Can be "etcd2", "etcd3", or "auto". Auto mode will select etcd3 on new installations, or etcd2 on upgrades. | +| sysctl | string | [See notes](#sysctl-default) | [See notes](#sysctl-description) | + +--- + +### allow-privileged + + + +**Description:** + +Allow kube-apiserver to run in privileged mode. Supported values are +"true", "false", and "auto". If "true", kube-apiserver will run in +privileged mode by default. If "false", kube-apiserver will never run in +privileged mode. If "auto", kube-apiserver will not run in privileged +mode by default, but will switch to privileged mode if gpu hardware is +detected on a worker node. + +[Back to table](#table-allow-privileged) + + +### api-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-apiserver. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-api-extra-args) + + +### audit-policy + + + +**Default:** + +``` +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +# Don't log read-only requests from the apiserver +- level: None + users: ["system:apiserver"] + verbs: ["get", "list", "watch"] +# Don't log kube-proxy watches +- level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - resources: ["endpoints", "services"] +# Don't log nodes getting their own status +- level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - resources: ["nodes"] +# Don't log kube-controller-manager and kube-scheduler getting endpoints +- level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - resources: ["endpoints"] +# Log everything else at the Request level. +- level: Request + omitStages: + - RequestReceived + +``` + + +[Back to table](#table-audit-policy) + + +### controller-manager-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-controller-manager. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-controller-manager being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-controller-manager-extra-args) + + +### dashboard-auth + + + +**Description:** + +Method of authentication for the Kubernetes dashboard. Allowed values are "auto", +"basic", and "token". If set to "auto", basic auth is used unless Keystone is +related to kubernetes-master, in which case token auth is used. + +[Back to table](#table-dashboard-auth) + + +### dns-provider + + + +**Description:** + +DNS provider addon to use. Can be "auto", "core-dns", "kube-dns", or +"none". + +CoreDNS is only supported on Kubernetes 1.14+. + +When set to "auto", the behavior is as follows: +- New deployments of Kubernetes 1.14+ will use CoreDNS +- New deployments of Kubernetes 1.13 or older will use KubeDNS +- Upgraded deployments will continue to use whichever provider was +previously used. + +[Back to table](#table-dns-provider) + + +### image-registry + + + +**Default:** + +``` +rocks.canonical.com:443/cdk +``` + + +[Back to table](#table-image-registry) + + +### install_keys + + + +**Description:** + +List of signing keys for install_sources package sources, per charmhelpers standard format (a yaml list of strings encoded as a string). The keys should be the full ASCII armoured GPG public keys. While GPG key ids are also supported and looked up on a keyserver, operators should be aware that this mechanism is insecure. null can be used if a standard package signing key is used that will already be installed on the machine, and for PPA sources where the package signing key is securely retrieved from Launchpad. + +[Back to table](#table-install_keys) + + +### install_sources + + + +**Description:** + +List of extra apt sources, per charm-helpers standard format (a yaml list of strings encoded as a string). Each source may be either a line that can be added directly to sources.list(5), or in the form ppa:/ for adding Personal Package Archives, or a distribution component to enable. + +[Back to table](#table-install_sources) + + +### keystone-policy + + + +**Default:** + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-auth-policy + namespace: kube-system + labels: + k8s-app: k8s-keystone-auth +data: + policies: | + [ + { + "resource": { + "verbs": ["get", "list", "watch"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-viewers"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "default" + }, + "match": [ + { + "type": "role", + "values": ["k8s-users"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + }, + { + "resource": { + "verbs": ["*"], + "resources": ["*"], + "version": "*", + "namespace": "*" + }, + "match": [ + { + "type": "role", + "values": ["k8s-admins"] + }, + { + "type": "project", + "values": ["k8s"] + } + ] + } + ] + +``` + + +[Back to table](#table-keystone-policy) + + +### loadbalancer-ips + + + +**Description:** + +Space separated list of IP addresses of loadbalancers in front of the control plane. +These can be either virtual IP addresses that have been floated in front of the control +plane or the IP of a loadbalancer appliance such as an F5. Workers will alternate IP +addresses from this list to distribute load - for example If you have 2 IPs and 4 workers, +each IP will be used by 2 workers. Note that this will only work if kubeapi-load-balancer +is not in use and there is a relation between kubernetes-master:kube-api-endpoint and +kubernetes-worker:kube-api-endpoint. If using the kubeapi-load-balancer, see the +loadbalancer-ips configuration variable on the kubeapi-load-balancer charm. + +[Back to table](#table-loadbalancer-ips) + + +### nagios_context + + + +**Description:** + +Used by the nrpe subordinate charms. +A string that will be prepended to instance name to set the host name +in nagios. So for instance the hostname would be something like: + +``` + juju-myservice-0 +``` + +If you're running multiple environments with the same services in them +this allows you to differentiate between them. + +[Back to table](#table-nagios_context) + + +### proxy-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-proxy. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-apiserver being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-proxy-extra-args) + + +### scheduler-extra-args + + + +**Description:** + +Space separated list of flags and key=value pairs that will be passed as arguments to +kube-scheduler. For example a value like this: + +``` + runtime-config=batch/v2alpha1=true profiling=true +``` + +will result in kube-scheduler being run with the following options: + --runtime-config=batch/v2alpha1=true --profiling=true + +[Back to table](#table-scheduler-extra-args) + + +### snapd_refresh + + + +**Description:** + +How often snapd handles updates for installed snaps. Setting an empty +string will check 4x per day. Set to "max" to delay the refresh as long +as possible. You may also set a custom string as described in the +'refresh.timer' section here: + https://forum.snapcraft.io/t/system-options/87 + +[Back to table](#table-snapd_refresh) + + +### sysctl + + + +**Default:** + +``` +{ net.ipv4.conf.all.forwarding : 1, net.ipv4.neigh.default.gc_thresh1 : 128, net.ipv4.neigh.default.gc_thresh2 : 28672, net.ipv4.neigh.default.gc_thresh3 : 32768, net.ipv6.neigh.default.gc_thresh1 : 128, net.ipv6.neigh.default.gc_thresh2 : 28672, net.ipv6.neigh.default.gc_thresh3 : 32768, fs.inotify.max_user_instances : 8192, fs.inotify.max_user_watches: 1048576 } +``` + + +[Back to table](#table-sysctl) + + + +**Description:** + +YAML formatted associative array of sysctl values, e.g.: +'{kernel.pid_max : 4194303 }'. Note that kube-proxy handles +the conntrack settings. The proper way to alter them is to +use the proxy-extra-args config to set them, e.g.: + +``` + juju config kubernetes-master proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" + juju config kubernetes-worker proxy-extra-args="conntrack-min=1000000 conntrack-max-per-core=250000" +``` + +The proxy-extra-args conntrack-min and conntrack-max-per-core can be set to 0 to ignore +kube-proxy's settings and use the sysctl settings instead. Note the fundamental difference between +the setting of conntrack-max-per-core vs nf_conntrack_max. + +[Back to table](#table-sysctl) + + + + + + +# Configuring K8s services + +**Charmed Kubernetes** ships with sensible, tested default configurations to +ensure a reliable Kubernetes experience, but of course these can be changed to +reflect the purpose and resources of your cluster. +The configuration section above details all available configuration options, +this section deals with specific, commonly used settings. +You may wish to also read the [Addons page][] for information on the extra +services installed with **Charmed Kubernetes**. + + +## IPVS (IP Virtual Server) + +IPVS implements transport-layer load balancing as part of the Linux kernel, and +can be used by the `kube-proxy` service to handle service routing. By default +`kube-proxy` uses a solution based on iptables, but this can cause a lot of +overhead in systems with large numbers of nodes. There is more information on +this in the upstream Kubernetes [IPVS deep dive][] documentation. + +IPVS is an extra option for kube-proxy, and can be enabled by changing the +configuration: + +``` +juju config kubernetes-master proxy-extra-args="proxy-mode=ipvs" +``` + +It is also necessary to change this configuration option on the worker: + +``` +juju config kubernetes-worker proxy-extra-args="proxy-mode=ipvs" +``` + +## Admission controls + +As with other aspects of the Kubernetes API, admission controls can be +enabled by adding extra values to the charm's +[api-extra-args](#api-extra-args-description) configuration. + +For admission controls, it may be useful to refer to the +[Kubernetes blog][blog-admission] for more information on the options, but +for example, to add the `PodSecurityPolicy` admission controller: + +1. Check any current config settings for `api-extra-args` (there are none by default): + ```bash + juju config kubernetes-master api-extra-args + ``` +2. Append the desired config option to the previous output and apply: + ```bash + juju config kubernetes-master api-extra-args="enable-admission-plugins=PodSecurityPolicy" + ``` + +Note that prior to Kubernetes 1.16 (kubernetes-master revision 778), the config +setting was `admission-control`, rather than `enable-admission-plugins`. + + +## Adding SANs and certificate regeneration + +As explained in the [Certificates and trust overview][certs-and-trust], the +[`extra_sans`](#table-extra_sans) configuration settings can be used to add +SANs and regenerate x509 certificate(s) for the API server running on the +Kubernetes master node(s), and for the load balancer. When this configuration is +changed, the master node(s) will regenerate its certificate and restart the API +server to update the certificate used for communication. Note: This is +disruptive and restarts the API server. + +The process is the same for both the `kubernetes-master` and the +`kubeapi-load-balancer`. The configuration option takes a space-separated list +of extra entries: + +```bash +juju config kubernetes-master extra_sans="master.mydomain.com lb.mydomain.com" +juju config kubeapi-load-balancer extra_sans="master.mydomain.com lb.mydomain.com" +``` +To clear the entries out of the certificate, use an empty string: + +```bash +juju config kubernetes-master extra_sans="" +juju config kubeapi-load-balancer extra_sans="" +``` + +## DNS for the cluster + +The DNS add-on allows pods to have DNS names in addition to IP addresses. +The Kubernetes cluster DNS server (based on the SkyDNS library) supports +forward lookups (A records), service lookups (SRV records) and reverse IP +address lookups (PTR records). More information about the DNS can be obtained +from the [Kubernetes DNS admin guide](http://kubernetes.io/docs/admin/dns/). + +# Actions + + + + +You can run an action with the following + +```bash +juju run-action kubernetes-master ACTION [parameters] [--wait] +``` +
+
+
+ apply-manifest +
+
+
+

+ Apply JSON formatted Kubernetes manifest to cluster +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
json
+

+ The content of the manifest to deploy in JSON format +

+

+ Default: +


+
+
+
+
+
+
+ cis-benchmark +
+
+
+

+ Run the CIS Kubernetes Benchmark against snap-based components. +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
apply
+

+ Apply remediations to address benchmark failures. The default, 'none', will not attempt to fix any reported failures. Set to 'conservative' to resolve simple failures. Set to 'dangerous' to attempt to resolve all failures. Note: Applying any remediation may result in an unusable cluster. +

+

+ Default: none +


+
config
+

+ Archive containing configuration files to use when running kube-bench. The default value is known to be compatible with snap components. When using a custom URL, append '#<hash_type>=<checksum>' to verify the archive integrity when downloaded. +

+

+ Default: https://github.com/charmed-kubernetes/kube-bench-c onfig/archive/cis-1.5.zip#sha1=cb8e78712ee5bfeab87 d0ed7c139a83e88915530 +


+
release
+

+ Set the kube-bench release to run. If set to 'upstream', the action will compile and use a local kube-bench binary built from the master branch of the upstream repository: https://github.com/aquasecurity/kube-bench This value may also be set to an accessible archive containing a pre-built kube-bench binary, for example: https://github.com/aquasecurity/kube- bench/releases/download/v0.0.34/kube-bench_0.0.34_ linux_amd64.tar.gz#sha256=f96d1fcfb84b18324f1299db 074d41ef324a25be5b944e79619ad1a079fca077 +

+

+ Default: https://github.com/aquasecurity/kube- bench/releases/download/v0.2.3/kube-bench_0.2.3_li nux_amd64.tar.gz#sha256=429a1db271689aafec009434de d1dea07a6685fee85a1deea638097c8512d548 +


+
+
+
+
+
+
+ debug +
+
+
+

+ Collect debug data +

+
+
+
+
+
+
+ get-kubeconfig +
+
+
+

+ Retrieve Kubernetes cluster config, including credentials +

+
+
+
+
+
+
+ namespace-create +
+
+
+

+ Create new namespace +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
name
+

+ Namespace name eg. staging +

+

+ Default: +


+
+
+
+
+
+
+ namespace-delete +
+
+
+

+ Delete namespace +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
name
+

+ Namespace name eg. staging +

+

+ Default: +


+
+
+
+
+
+
+ namespace-list +
+
+
+

+ List existing k8s namespaces +

+
+
+
+
+
+
+ restart +
+
+
+

+ Restart the Kubernetes master services on demand. +

+
+
+
+
+
+
+ upgrade +
+
+
+

+ Upgrade the kubernetes snaps +

+
+
+
+
+
+

+ This action has the following parameters: +

+
+
fix-cluster-name
+

+ If using the OpenStack cloud provider, whether to fix the cluster name sent to it to include the cluster tag. This fixes an issue with load balancers conflicting with other clusters in the same project but will cause new load balancers to be created which will require manual intervention to resolve. +

+

+ Default: True +


+
+
+
+ + + + + + +# More information + +- [Kubernetes github project](https://github.com/kubernetes/kubernetes) +- [Kubernetes issue tracker](https://github.com/kubernetes/kubernetes/issues) +- [Kubernetes documentation](http://kubernetes.io/docs/) +- [Kubernetes releases](https://github.com/kubernetes/kubernetes/releases) + + +[IPVS deep dive]: https://kubernetes.io/blog/2018/07/09/ipvs-based-in-cluster-load-balancing-deep-dive/ +[blog-admission]: https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/ +[Addons page]: /kubernetes/docs/cdk-addons +[certs-and-trust]: /kubernetes/docs/certs-and-trust diff --git a/kubernetes-master/hooks/loadbalancer-external-relation-broken b/kubernetes-master/hooks/loadbalancer-external-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-external-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-external-relation-changed b/kubernetes-master/hooks/loadbalancer-external-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-external-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-external-relation-created b/kubernetes-master/hooks/loadbalancer-external-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-external-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-external-relation-departed b/kubernetes-master/hooks/loadbalancer-external-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-external-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-external-relation-joined b/kubernetes-master/hooks/loadbalancer-external-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-external-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-internal-relation-broken b/kubernetes-master/hooks/loadbalancer-internal-relation-broken new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-internal-relation-broken @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-internal-relation-changed b/kubernetes-master/hooks/loadbalancer-internal-relation-changed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-internal-relation-changed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-internal-relation-created b/kubernetes-master/hooks/loadbalancer-internal-relation-created new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-internal-relation-created @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-internal-relation-departed b/kubernetes-master/hooks/loadbalancer-internal-relation-departed new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-internal-relation-departed @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/loadbalancer-internal-relation-joined b/kubernetes-master/hooks/loadbalancer-internal-relation-joined new file mode 100755 index 0000000..9858c6b --- /dev/null +++ b/kubernetes-master/hooks/loadbalancer-internal-relation-joined @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Load modules from $JUJU_CHARM_DIR/lib +import sys +sys.path.append('lib') + +from charms.layer import basic # noqa +basic.bootstrap_charm_deps() + +from charmhelpers.core import hookenv # noqa +hookenv.atstart(basic.init_config_states) +hookenv.atexit(basic.clear_config_states) + + +# This will load and run the appropriate @hook and other decorated +# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive, +# and $JUJU_CHARM_DIR/hooks/relations. +# +# See https://jujucharms.com/docs/stable/authors-charm-building +# for more information on this pattern. +from charms.reactive import main # noqa +main() diff --git a/kubernetes-master/hooks/relations/azure-integration/provides.py b/kubernetes-master/hooks/relations/azure-integration/provides.py index e0d596e..5ff7d3a 100644 --- a/kubernetes-master/hooks/relations/azure-integration/provides.py +++ b/kubernetes-master/hooks/relations/azure-integration/provides.py @@ -136,13 +136,21 @@ class IntegrationRequest: def send_additional_metadata(self, resource_group_location, vnet_name, vnet_resource_group, - subnet_name, security_group_name): + subnet_name, security_group_name, + security_group_resource_group, + use_managed_identity=True, aad_client=None, + aad_secret=None, tenant_id=None): self._to_publish.update({ 'resource-group-location': resource_group_location, 'vnet-name': vnet_name, 'vnet-resource-group': vnet_resource_group, 'subnet-name': subnet_name, 'security-group-name': security_group_name, + 'security-group-resource-group': security_group_resource_group, + 'use-managed-identity': use_managed_identity, + 'aad-client': aad_client, + 'aad-client-secret': aad_secret, + 'tenant-id': tenant_id }) @property diff --git a/kubernetes-master/hooks/relations/azure-integration/requires.py b/kubernetes-master/hooks/relations/azure-integration/requires.py index 62f2b01..600d69e 100644 --- a/kubernetes-master/hooks/relations/azure-integration/requires.py +++ b/kubernetes-master/hooks/relations/azure-integration/requires.py @@ -211,8 +211,24 @@ class AzureIntegrationRequires(Endpoint): return requested and requested == completed @property - def credentials(self): - return self._received['credentials'] + def security_group_resource_group(self): + return self._received['security-group-resource-group'] + + @property + def managed_identity(self): + return self._received['use-managed-identity'] + + @property + def aad_client_id(self): + return self._received['aad-client'] + + @property + def aad_client_secret(self): + return self._received['aad-client-secret'] + + @property + def tenant_id(self): + return self._received['tenant-id'] def _request(self, keyvals): alphabet = string.ascii_letters + string.digits diff --git a/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py b/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py index c442c85..6c8bb70 100644 --- a/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py +++ b/kubernetes-master/hooks/relations/ceph-client/lib/base_requires.py @@ -16,7 +16,11 @@ import json import charms.reactive as reactive -from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import ( + application_name, + local_unit, + log, +) from charmhelpers.contrib.network.ip import format_ipv6_addr from charmhelpers.contrib.storage.linux.ceph import ( @@ -285,6 +289,9 @@ class CephRequires(reactive.Endpoint): for relation in self.relations: relation.to_publish['broker_req'] = json.loads( request.request) + relation.to_publish_raw[ + 'application-name'] = application_name() + relation.to_publish_raw['unit-name'] = local_unit() def get_current_request(self): broker_reqs = [] diff --git a/kubernetes-master/hooks/relations/hacluster/interface.yaml b/kubernetes-master/hooks/relations/hacluster/interface.yaml index edd0c90..f03f3d7 100644 --- a/kubernetes-master/hooks/relations/hacluster/interface.yaml +++ b/kubernetes-master/hooks/relations/hacluster/interface.yaml @@ -11,3 +11,6 @@ ignore: - 'tox.ini' - 'unit_tests' - '.zuul.yaml' + - 'setup.cfg' + - 'setup.py' + - '**/ops_ha_interface.py' diff --git a/kubernetes-master/hooks/relations/hacluster/interface_hacluster/__init__.py b/kubernetes-master/hooks/relations/hacluster/interface_hacluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kubeapi-load-balancer/hooks/relations/hacluster/common.py b/kubernetes-master/hooks/relations/hacluster/interface_hacluster/common.py similarity index 72% rename from kubeapi-load-balancer/hooks/relations/hacluster/common.py rename to kubernetes-master/hooks/relations/hacluster/interface_hacluster/common.py index d896510..e4b13ff 100644 --- a/kubeapi-load-balancer/hooks/relations/hacluster/common.py +++ b/kubernetes-master/hooks/relations/hacluster/interface_hacluster/common.py @@ -13,9 +13,292 @@ import hashlib import ipaddress +import json from six import string_types +class ResourceManagement(): + + def data_changed(self, data_id, data, hash_type='md5'): + raise NotImplementedError + + def get_local(self, key, default=None, scope=None): + raise NotImplementedError + + def set_local(self, key=None, value=None, data=None, scope=None, **kwdata): + raise NotImplementedError + + def set_remote(self, key=None, value=None, data=None, scope=None, + **kwdata): + raise NotImplementedError + + def is_clustered(self): + """Has the hacluster charm set clustered? + + The hacluster charm sets cluster=True when it determines it is ready. + Check the relation data for clustered and force a boolean return. + + :returns: boolean + """ + clustered_values = self.get_remote_all('clustered') + if clustered_values: + # There is only ever one subordinate hacluster unit + clustered = clustered_values[0] + # Future versions of hacluster will return a bool + # Current versions return a string + if type(clustered) is bool: + return clustered + elif (clustered is not None and + (clustered.lower() == 'true' or + clustered.lower() == 'yes')): + return True + return False + + def bind_on(self, iface=None, mcastport=None): + relation_data = {} + if iface: + relation_data['corosync_bindiface'] = iface + if mcastport: + relation_data['corosync_mcastport'] = mcastport + + if relation_data and self.data_changed('hacluster-bind_on', + relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def manage_resources(self, crm): + """ + Request for the hacluster to manage the resources defined in the + crm object. + + res = CRM() + res.primitive('res_neutron_haproxy', 'lsb:haproxy', + op='monitor interval="5s"') + res.init_services('haproxy') + res.clone('cl_nova_haproxy', 'res_neutron_haproxy') + + hacluster.manage_resources(crm) + + :param crm: CRM() instance - Config object for Pacemaker resources + :returns: None + """ + relation_data = { + 'json_{}'.format(k): json.dumps(v, sort_keys=True) + for k, v in crm.items() if v + } + if self.data_changed('hacluster-manage_resources', relation_data): + self.set_local(**relation_data) + self.set_remote(**relation_data) + + def bind_resources(self, iface=None, mcastport=None): + """Inform the ha subordinate about each service it should manage. The + child class specifies the services via self.ha_resources + + :param iface: string - Network interface to bind to + :param mcastport: int - Multicast port corosync should use for cluster + management traffic + """ + if mcastport is None: + mcastport = 4440 + resources_dict = self.get_local('resources') + self.bind_on(iface=iface, mcastport=mcastport) + if resources_dict: + resources = CRM(**resources_dict) + self.manage_resources(resources) + + def delete_resource(self, resource_name): + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add_delete_resource(resource_name) + self.set_local(resources=resources) + + def add_vip(self, name, vip, iface=None, netmask=None): + """Add a VirtualIP object for each user specified vip to self.resources + + :param name: string - Name of service + :param vip: string - Virtual IP to be managed + :param iface: string - Network interface to bind vip to + :param netmask: string - Netmask for vip + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + VirtualIP( + name, + vip, + nic=iface, + cidr=netmask,)) + + # Vip Group + group = 'grp_{}_vips'.format(name) + vip_res_group_members = [] + if resource_dict: + vip_resources = resource_dict.get('resources') + if vip_resources: + for vip_res in vip_resources: + if 'vip' in vip_res: + vip_res_group_members.append(vip_res) + resources.group(group, + *sorted(vip_res_group_members)) + + self.set_local(resources=resources) + + def remove_vip(self, name, vip, iface=None): + """Remove a virtual IP + + :param name: string - Name of service + :param vip: string - Virtual IP + :param iface: string - Network interface vip bound to + """ + if iface: + nic_name = iface + else: + nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] + self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) + + def add_init_service(self, name, service, clone=True): + """Add a InitService object for haproxy to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in init system + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + InitService(name, service, clone)) + self.set_local(resources=resources) + + def remove_init_service(self, name, service): + """Remove an init service + + :param name: string - Name of service + :param service: string - Name of service used in init system + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_systemd_service(self, name, service, clone=True): + """Add a SystemdService object to self.resources + + :param name: string - Name of service + :param service: string - Name service uses in systemd + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + SystemdService(name, service, clone)) + self.set_local(resources=resources) + + def remove_systemd_service(self, name, service): + """Remove a systemd service + + :param name: string - Name of service + :param service: string - Name of service used in systemd + """ + res_key = 'res_{}_{}'.format( + name.replace('-', '_'), + service.replace('-', '_')) + self.delete_resource(res_key) + + def add_dnsha(self, name, ip, fqdn, endpoint_type): + """Add a DNS entry to self.resources + + :param name: string - Name of service + :param ip: string - IP address dns entry should resolve to + :param fqdn: string - The DNS entry name + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.add( + DNSEntry(name, ip, fqdn, endpoint_type)) + + # DNS Group + group = 'grp_{}_hostnames'.format(name) + dns_res_group_members = [] + if resource_dict: + dns_resources = resource_dict.get('resources') + if dns_resources: + for dns_res in dns_resources: + if 'hostname' in dns_res: + dns_res_group_members.append(dns_res) + resources.group(group, + *sorted(dns_res_group_members)) + + self.set_local(resources=resources) + + def remove_dnsha(self, name, endpoint_type): + """Remove a DNS entry + + :param name: string - Name of service + :param endpoint_type: string - Public, private, internal etc + :returns: None + """ + res_key = 'res_{}_{}_hostname'.format( + self.service_name.replace('-', '_'), + self.endpoint_type) + self.delete_resource(res_key) + + def add_colocation(self, name, score, colo_resources, node_attribute=None): + """Add a colocation directive + + :param name: string - Name of colocation directive + :param score: string - ALWAYS, INFINITY, NEVER, NEGATIVE_INFINITY}. See + CRM.colocation for more details + :param colo_resources: List[string] - List of resource names to + colocate + :param node_attribute: Colocate resources on a set of nodes with this + attribute and not necessarily on the same node. + """ + node_config = {} + if node_attribute: + node_config = { + 'node_attribute': node_attribute} + resource_dict = self.get_local('resources') + if resource_dict: + resources = CRM(**resource_dict) + else: + resources = CRM() + resources.colocation( + name, + score, + *colo_resources, + **node_config) + self.set_local(resources=resources) + + def remove_colocation(self, name): + """Remove a colocation directive + + :param name: string - Name of colocation directive + """ + self.delete_resource(name) + + def get_remote_all(self, key, default=None): + """Return a list of all values presented by remote units for key""" + raise NotImplementedError + + class CRM(dict): """ Configuration object for Pacemaker resources for the HACluster diff --git a/kubernetes-master/hooks/relations/hacluster/requires.py b/kubernetes-master/hooks/relations/hacluster/requires.py index 9b72d97..395a658 100644 --- a/kubernetes-master/hooks/relations/hacluster/requires.py +++ b/kubernetes-master/hooks/relations/hacluster/requires.py @@ -11,18 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import hashlib - -import relations.hacluster.common +import relations.hacluster.interface_hacluster.common as common from charms.reactive import hook from charms.reactive import RelationBase from charms.reactive import scopes -from charms.reactive.helpers import data_changed +from charms.reactive.helpers import data_changed as rh_data_changed from charmhelpers.core import hookenv -class HAClusterRequires(RelationBase): +class HAClusterRequires(RelationBase, common.ResourceManagement): # The hacluster charm is a subordinate charm and really only works # for a single service to the HA Cluster relation, therefore set the # expected scope to be GLOBAL. @@ -44,232 +41,8 @@ class HAClusterRequires(RelationBase): self.remove_state('{relation_name}.available') self.remove_state('{relation_name}.connected') - def is_clustered(self): - """Has the hacluster charm set clustered? - - The hacluster charm sets cluster=True when it determines it is ready. - Check the relation data for clustered and force a boolean return. - - :returns: boolean - """ - clustered_values = self.get_remote_all('clustered') - if clustered_values: - # There is only ever one subordinate hacluster unit - clustered = clustered_values[0] - # Future versions of hacluster will return a bool - # Current versions return a string - if type(clustered) is bool: - return clustered - elif (clustered is not None and - (clustered.lower() == 'true' or - clustered.lower() == 'yes')): - return True - return False - - def bind_on(self, iface=None, mcastport=None): - relation_data = {} - if iface: - relation_data['corosync_bindiface'] = iface - if mcastport: - relation_data['corosync_mcastport'] = mcastport - - if relation_data and data_changed('hacluster-bind_on', relation_data): - self.set_local(**relation_data) - self.set_remote(**relation_data) - - def manage_resources(self, crm): - """ - Request for the hacluster to manage the resources defined in the - crm object. - - res = CRM() - res.primitive('res_neutron_haproxy', 'lsb:haproxy', - op='monitor interval="5s"') - res.init_services('haproxy') - res.clone('cl_nova_haproxy', 'res_neutron_haproxy') - - hacluster.manage_resources(crm) - - :param crm: CRM() instance - Config object for Pacemaker resources - :returns: None - """ - relation_data = { - 'json_{}'.format(k): json.dumps(v, sort_keys=True) - for k, v in crm.items() if v - } - if data_changed('hacluster-manage_resources', relation_data): - self.set_local(**relation_data) - self.set_remote(**relation_data) - - def bind_resources(self, iface=None, mcastport=None): - """Inform the ha subordinate about each service it should manage. The - child class specifies the services via self.ha_resources - - :param iface: string - Network interface to bind to - :param mcastport: int - Multicast port corosync should use for cluster - management traffic - """ - if mcastport is None: - mcastport = 4440 - resources_dict = self.get_local('resources') - self.bind_on(iface=iface, mcastport=mcastport) - if resources_dict: - resources = relations.hacluster.common.CRM(**resources_dict) - self.manage_resources(resources) - - def delete_resource(self, resource_name): - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add_delete_resource(resource_name) - self.set_local(resources=resources) - - def add_vip(self, name, vip, iface=None, netmask=None): - """Add a VirtualIP object for each user specified vip to self.resources - - :param name: string - Name of service - :param vip: string - Virtual IP to be managed - :param iface: string - Network interface to bind vip to - :param netmask: string - Netmask for vip - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.VirtualIP( - name, - vip, - nic=iface, - cidr=netmask,)) - - # Vip Group - group = 'grp_{}_vips'.format(name) - vip_res_group_members = [] - if resource_dict: - vip_resources = resource_dict.get('resources') - if vip_resources: - for vip_res in vip_resources: - if 'vip' in vip_res: - vip_res_group_members.append(vip_res) - resources.group(group, - *sorted(vip_res_group_members)) - - self.set_local(resources=resources) - - def remove_vip(self, name, vip, iface=None): - """Remove a virtual IP - - :param name: string - Name of service - :param vip: string - Virtual IP - :param iface: string - Network interface vip bound to - """ - if iface: - nic_name = iface - else: - nic_name = hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7] - self.delete_resource('res_{}_{}_vip'.format(name, nic_name)) - - def add_init_service(self, name, service, clone=True): - """Add a InitService object for haproxy to self.resources - - :param name: string - Name of service - :param service: string - Name service uses in init system - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.InitService(name, service, clone)) - self.set_local(resources=resources) - - def remove_init_service(self, name, service): - """Remove an init service - - :param name: string - Name of service - :param service: string - Name of service used in init system - """ - res_key = 'res_{}_{}'.format( - name.replace('-', '_'), - service.replace('-', '_')) - self.delete_resource(res_key) - - def add_systemd_service(self, name, service, clone=True): - """Add a SystemdService object to self.resources - - :param name: string - Name of service - :param service: string - Name service uses in systemd - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.SystemdService(name, service, clone)) - self.set_local(resources=resources) - - def remove_systemd_service(self, name, service): - """Remove a systemd service - - :param name: string - Name of service - :param service: string - Name of service used in systemd - """ - res_key = 'res_{}_{}'.format( - name.replace('-', '_'), - service.replace('-', '_')) - self.delete_resource(res_key) - - def add_dnsha(self, name, ip, fqdn, endpoint_type): - """Add a DNS entry to self.resources - - :param name: string - Name of service - :param ip: string - IP address dns entry should resolve to - :param fqdn: string - The DNS entry name - :param endpoint_type: string - Public, private, internal etc - :returns: None - """ - resource_dict = self.get_local('resources') - if resource_dict: - resources = relations.hacluster.common.CRM(**resource_dict) - else: - resources = relations.hacluster.common.CRM() - resources.add( - relations.hacluster.common.DNSEntry(name, ip, fqdn, endpoint_type)) - - # DNS Group - group = 'grp_{}_hostnames'.format(name) - dns_res_group_members = [] - if resource_dict: - dns_resources = resource_dict.get('resources') - if dns_resources: - for dns_res in dns_resources: - if 'hostname' in dns_res: - dns_res_group_members.append(dns_res) - resources.group(group, - *sorted(dns_res_group_members)) - - self.set_local(resources=resources) - - def remove_dnsha(self, name, endpoint_type): - """Remove a DNS entry - - :param name: string - Name of service - :param endpoint_type: string - Public, private, internal etc - :returns: None - """ - res_key = 'res_{}_{}_hostname'.format( - self.service_name.replace('-', '_'), - self.endpoint_type) - self.delete_resource(res_key) + def data_changed(self, data_id, data, hash_type='md5'): + return rh_data_changed(data_id, data, hash_type) def get_remote_all(self, key, default=None): """Return a list of all values presented by remote units for key""" diff --git a/kubernetes-master/hooks/relations/hacluster/test-requirements.txt b/kubernetes-master/hooks/relations/hacluster/test-requirements.txt index 6da7df2..12452e5 100644 --- a/kubernetes-master/hooks/relations/hacluster/test-requirements.txt +++ b/kubernetes-master/hooks/relations/hacluster/test-requirements.txt @@ -4,3 +4,4 @@ stestr>=2.2.0 charms.reactive coverage>=3.6 netifaces +git+https://github.com/canonical/operator.git#egg=ops diff --git a/kubernetes-master/hooks/relations/kube-control/provides.py b/kubernetes-master/hooks/relations/kube-control/provides.py index 9d3a829..918ace1 100644 --- a/kubernetes-master/hooks/relations/kube-control/provides.py +++ b/kubernetes-master/hooks/relations/kube-control/provides.py @@ -150,3 +150,11 @@ class KubeControlProvider(Endpoint): """ for relation in self.relations: relation.to_publish['default-cni'] = default_cni + + def set_api_endpoints(self, endpoints): + """ + Send the list of API endpoint URLs to which workers should connect. + """ + endpoints = sorted(endpoints) + for relation in self.relations: + relation.to_publish['api-endpoints'] = endpoints diff --git a/kubernetes-master/hooks/relations/kube-control/requires.py b/kubernetes-master/hooks/relations/kube-control/requires.py index 72ce1f6..a0c3b0d 100644 --- a/kubernetes-master/hooks/relations/kube-control/requires.py +++ b/kubernetes-master/hooks/relations/kube-control/requires.py @@ -48,6 +48,9 @@ class KubeControlRequirer(Endpoint): toggle_flag( self.expand_name('{endpoint_name}.default_cni.available'), self.is_joined and self.get_default_cni() is not None) + toggle_flag( + self.expand_name('{endpoint_name}.api_endpoints.available'), + self.is_joined and self.get_api_endpoints()) def get_auth_credentials(self, user): """ @@ -147,3 +150,12 @@ class KubeControlRequirer(Endpoint): Default CNI network to use. """ return self.all_joined_units.received['default-cni'] + + def get_api_endpoints(self): + """ + Returns a list of API endpoint URLs. + """ + endpoints = set() + for unit in self.all_joined_units: + endpoints.update(unit.received['api-endpoints'] or []) + return sorted(endpoints) diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/kubernetes-master/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/kubernetes-master/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore index e43b0f9..8d150f3 100644 --- a/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore +++ b/kubernetes-master/hooks/relations/kubernetes-cni/.gitignore @@ -1 +1,4 @@ .DS_Store +.tox +__pycache__ +*.pyc diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml b/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml deleted file mode 100644 index d2be8be..0000000 --- a/kubernetes-master/hooks/relations/kubernetes-cni/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: python -python: - - "3.5" - - "3.6" - - "3.7" -install: - - pip install tox-travis -script: - - tox diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/provides.py b/kubernetes-master/hooks/relations/kubernetes-cni/provides.py index 0b4aada..9095c19 100644 --- a/kubernetes-master/hooks/relations/kubernetes-cni/provides.py +++ b/kubernetes-master/hooks/relations/kubernetes-cni/provides.py @@ -1,48 +1,46 @@ #!/usr/bin/python from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path from charms.reactive import Endpoint from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag class CNIPluginProvider(Endpoint): def manage_flags(self): - toggle_flag(self.expand_name('{endpoint_name}.connected'), - self.is_joined) - toggle_flag(self.expand_name('{endpoint_name}.available'), - self.config_available()) - if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')): - clear_flag(self.expand_name('{endpoint_name}.configured')) - clear_flag(self.expand_name('endpoint.{endpoint_name}.changed')) + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")): + clear_flag(self.expand_name("{endpoint_name}.configured")) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) - def set_config(self, is_master, kubeconfig_path): - ''' Relays a dict of kubernetes configuration information. ''' + def set_config(self, is_master): + """Relays a dict of kubernetes configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'is_master': is_master, - 'kubeconfig_path': kubeconfig_path - }) - set_flag(self.expand_name('{endpoint_name}.configured')) + relation.to_publish_raw.update({"is_master": is_master}) + set_flag(self.expand_name("{endpoint_name}.configured")) def config_available(self): - ''' Ensures all config from the CNI plugin is available. ''' + """Ensures all config from the CNI plugin is available.""" goal_state = hookenv.goal_state() related_apps = [ - app for app in goal_state.get('relations', {}).get(self.endpoint_name, '') - if '/' not in app + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app ] if not related_apps: return False configs = self.get_configs() return all( - 'cidr' in config and 'cni-conf-file' in config - for config in [ - configs.get(related_app, {}) for related_app in related_apps - ] + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] ) def get_config(self, default=None): - ''' Get CNI config for one related application. + """Get CNI config for one related application. If default is specified, and there is a related application with a matching name, then that application is chosen. Otherwise, the @@ -50,13 +48,13 @@ class CNIPluginProvider(Endpoint): Whichever application is chosen, that application's CNI config is returned. - ''' + """ configs = self.get_configs() if not configs: return {} elif default and default not in configs: - msg = 'relation not found for default CNI %s, ignoring' % default - hookenv.log(msg, level='WARN') + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") return self.get_config() elif default: return configs.get(default, {}) @@ -64,7 +62,7 @@ class CNIPluginProvider(Endpoint): return configs.get(sorted(configs)[0], {}) def get_configs(self): - ''' Get CNI configs for all related applications. + """Get CNI configs for all related applications. This returns a mapping of application names to CNI configs. Here's an example return value: @@ -78,8 +76,14 @@ class CNIPluginProvider(Endpoint): 'cni-conf-file': '10-calico.conflist' } } - ''' + """ return { relation.application_name: relation.joined_units.received_raw - for relation in self.relations if relation.application_name + for relation in self.relations + if relation.application_name } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/kubernetes-master/hooks/relations/kubernetes-cni/requires.py b/kubernetes-master/hooks/relations/kubernetes-cni/requires.py index 039b912..2067826 100644 --- a/kubernetes-master/hooks/relations/kubernetes-cni/requires.py +++ b/kubernetes-master/hooks/relations/kubernetes-cni/requires.py @@ -1,45 +1,54 @@ #!/usr/bin/python +from charmhelpers.core import unitdata from charms.reactive import Endpoint from charms.reactive import when_any, when_not from charms.reactive import set_state, remove_state +db = unitdata.kv() + class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) - @when_any('endpoint.{endpoint_name}.joined', - 'endpoint.{endpoint_name}.changed') + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") def changed(self): - ''' Indicate the relation is connected, and if the relation data is - set it is also available. ''' - set_state(self.expand_name('{endpoint_name}.connected')) + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) config = self.get_config() - if config['is_master'] == 'True': - set_state(self.expand_name('{endpoint_name}.is-master')) - set_state(self.expand_name('{endpoint_name}.configured')) - elif config['is_master'] == 'False': - set_state(self.expand_name('{endpoint_name}.is-worker')) - set_state(self.expand_name('{endpoint_name}.configured')) + if config["is_master"] == "True": + set_state(self.expand_name("{endpoint_name}.is-master")) + set_state(self.expand_name("{endpoint_name}.configured")) + elif config["is_master"] == "False": + set_state(self.expand_name("{endpoint_name}.is-worker")) + set_state(self.expand_name("{endpoint_name}.configured")) else: - remove_state(self.expand_name('{endpoint_name}.configured')) - remove_state(self.expand_name('endpoint.{endpoint_name}.changed')) + remove_state(self.expand_name("{endpoint_name}.configured")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) - @when_not('endpoint.{endpoint_name}.joined') + @when_not("endpoint.{endpoint_name}.joined") def broken(self): - ''' Indicate the relation is no longer available and not connected. ''' - remove_state(self.expand_name('{endpoint_name}.connected')) - remove_state(self.expand_name('{endpoint_name}.is-master')) - remove_state(self.expand_name('{endpoint_name}.is-worker')) - remove_state(self.expand_name('{endpoint_name}.configured')) + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("{endpoint_name}.is-master")) + remove_state(self.expand_name("{endpoint_name}.is-worker")) + remove_state(self.expand_name("{endpoint_name}.configured")) def get_config(self): - ''' Get the kubernetes configuration information. ''' + """Get the kubernetes configuration information.""" return self.all_joined_units.received_raw def set_config(self, cidr, cni_conf_file): - ''' Sets the CNI configuration information. ''' + """Sets the CNI configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'cidr': cidr, - 'cni-conf-file': cni_conf_file - }) + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/kubernetes-master/lib/charms/layer/basic.py b/kubernetes-master/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/kubernetes-master/lib/charms/layer/basic.py +++ b/kubernetes-master/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/kubernetes-master/lib/charms/layer/kubernetes_common.py b/kubernetes-master/lib/charms/layer/kubernetes_common.py index 0ac309f..fb14ad2 100644 --- a/kubernetes-master/lib/charms/layer/kubernetes_common.py +++ b/kubernetes-master/lib/charms/layer/kubernetes_common.py @@ -21,7 +21,12 @@ import subprocess import hashlib import json import traceback +import random +import string +import tempfile +import yaml +from base64 import b64decode, b64encode from pathlib import Path from subprocess import check_output, check_call from socket import gethostname, getfqdn @@ -29,19 +34,23 @@ from shlex import split from subprocess import CalledProcessError from charmhelpers.core import hookenv, unitdata from charmhelpers.core import host +from charmhelpers.core.templating import render from charms.reactive import endpoint_from_flag, is_state from time import sleep +AUTH_SECRET_NS = "kube-system" +AUTH_SECRET_TYPE = "juju.is/token-auth" + db = unitdata.kv() -kubeclientconfig_path = '/root/.kube/config' -gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS' -kubeproxyconfig_path = '/root/cdk/kubeproxyconfig' -certs_dir = Path('/root/cdk') -ca_crt_path = certs_dir / 'ca.crt' -server_crt_path = certs_dir / 'server.crt' -server_key_path = certs_dir / 'server.key' -client_crt_path = certs_dir / 'client.crt' -client_key_path = certs_dir / 'client.key' +kubeclientconfig_path = "/root/.kube/config" +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +kubeproxyconfig_path = "/root/cdk/kubeproxyconfig" +certs_dir = Path("/root/cdk") +ca_crt_path = certs_dir / "ca.crt" +server_crt_path = certs_dir / "server.crt" +server_key_path = certs_dir / "server.key" +client_crt_path = certs_dir / "client.crt" +client_key_path = certs_dir / "client.key" def get_version(bin_name): @@ -56,13 +65,13 @@ def get_version(bin_name): (1, 6, 0) """ - cmd = '{} --version'.format(bin_name).split() - version_string = subprocess.check_output(cmd).decode('utf-8') + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) def retry(times, delay_secs): - """ Decorator for retrying a method call. + """Decorator for retrying a method call. Args: times: How many times should we retry before giving up @@ -72,7 +81,7 @@ def retry(times, delay_secs): """ def retry_decorator(func): - """ Decorator to wrap the function provided. + """Decorator to wrap the function provided. Args: func: Provided function should return either True od False @@ -80,6 +89,7 @@ def retry(times, delay_secs): Returns: A callable that would return the last call outcome """ + def _wrapped(*args, **kwargs): res = func(*args, **kwargs) attempt = 0 @@ -90,36 +100,37 @@ def retry(times, delay_secs): break attempt += 1 return res + return _wrapped return retry_decorator def calculate_resource_checksum(resource): - ''' Calculate a checksum for a resource ''' + """Calculate a checksum for a resource""" md5 = hashlib.md5() path = hookenv.resource_get(resource) if path: - with open(path, 'rb') as f: + with open(path, "rb") as f: data = f.read() md5.update(data) return md5.hexdigest() def get_resource_checksum_db_key(checksum_prefix, resource): - ''' Convert a resource name to a resource checksum database key. ''' + """Convert a resource name to a resource checksum database key.""" return checksum_prefix + resource def migrate_resource_checksums(checksum_prefix, snap_resources): - ''' Migrate resource checksums from the old schema to the new one ''' + """Migrate resource checksums from the old schema to the new one""" for resource in snap_resources: new_key = get_resource_checksum_db_key(checksum_prefix, resource) if not db.get(new_key): path = hookenv.resource_get(resource) if path: # old key from charms.reactive.helpers.any_file_changed - old_key = 'reactive.files_changed.' + path + old_key = "reactive.files_changed." + path old_checksum = db.get(old_key) db.set(new_key, old_checksum) else: @@ -131,7 +142,7 @@ def migrate_resource_checksums(checksum_prefix, snap_resources): def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): - hookenv.status_set('maintenance', 'Checking resources') + hookenv.status_set("maintenance", "Checking resources") for resource in snap_resources: key = get_resource_checksum_db_key(checksum_prefix, resource) old_checksum = db.get(key) @@ -148,25 +159,31 @@ def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): db.set(key, checksum) -def get_ingress_address(endpoint_name): +def get_ingress_address(endpoint_name, ignore_addresses=None): try: network_info = hookenv.network_get(endpoint_name) except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: # if they don't have ingress-addresses they are running a juju that # doesn't support spaces, so just return the private address - return hookenv.unit_get('private-address') + return hookenv.unit_get("private-address") - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) # Need to prefer non-fan IP addresses due to various issues, e.g. # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll # prioritize those last. Not technically correct, but good enough. try: - sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731 + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 addresses = sorted(addresses, key=sort_key) except Exception: hookenv.log(traceback.format_exc()) @@ -180,10 +197,10 @@ def get_ingress_address6(endpoint_name): except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: return None - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] for addr in addresses: ip_addr = ipaddress.ip_interface(addr).ip @@ -194,35 +211,35 @@ def get_ingress_address6(endpoint_name): def service_restart(service_name): - hookenv.status_set('maintenance', 'Restarting {0} service'.format( - service_name)) + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) host.service_restart(service_name) def service_start(service_name): - hookenv.log('Starting {0} service.'.format(service_name)) + hookenv.log("Starting {0} service.".format(service_name)) host.service_stop(service_name) def service_stop(service_name): - hookenv.log('Stopping {0} service.'.format(service_name)) + hookenv.log("Stopping {0} service.".format(service_name)) host.service_stop(service_name) def arch(): - '''Return the package architecture as a string. Raise an exception if the - architecture is not supported by kubernetes.''' + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" # Get the package architecture for this system. - architecture = check_output(['dpkg', '--print-architecture']).rstrip() + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() # Convert the binary result into a string. - architecture = architecture.decode('utf-8') + architecture = architecture.decode("utf-8") return architecture def get_service_ip(service, namespace="kube-system", errors_fatal=True): try: - output = kubectl('get', 'service', '--namespace', namespace, service, - '--output', 'json') + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) except CalledProcessError: if errors_fatal: raise @@ -230,20 +247,20 @@ def get_service_ip(service, namespace="kube-system", errors_fatal=True): return None else: svc = json.loads(output.decode()) - return svc['spec']['clusterIP'] + return svc["spec"]["clusterIP"] def kubectl(*args): - ''' Run a kubectl cli command with a config file. Returns stdout and throws - an error if the command fails. ''' - command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args) - hookenv.log('Executing {}'.format(command)) + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) return check_output(command) def kubectl_success(*args): - ''' Runs kubectl with the given args. Returns True if successful, False if - not. ''' + """Runs kubectl with the given args. Returns True if successful, False if + not.""" try: kubectl(*args) return True @@ -252,75 +269,97 @@ def kubectl_success(*args): def kubectl_manifest(operation, manifest): - ''' Wrap the kubectl creation command when using filepath resources + """Wrap the kubectl creation command when using filepath resources :param operation - one of get, create, delete, replace :param manifest - filepath to the manifest - ''' + """ # Deletions are a special case - if operation == 'delete': + if operation == "delete": # Ensure we immediately remove requested resources with --now - return kubectl_success(operation, '-f', manifest, '--now') + return kubectl_success(operation, "-f", manifest, "--now") else: # Guard against an error re-creating the same manifest multiple times - if operation == 'create': + if operation == "create": # If we already have the definition, its probably safe to assume # creation was true. - if kubectl_success('get', '-f', manifest): - hookenv.log('Skipping definition for {}'.format(manifest)) + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) return True # Execute the requested command that did not match any of the special # cases above - return kubectl_success(operation, '-f', manifest) + return kubectl_success(operation, "-f", manifest) def get_node_name(): - kubelet_extra_args = parse_extra_args('kubelet-extra-args') - cloud_provider = kubelet_extra_args.get('cloud-provider', '') - if is_state('endpoint.aws.ready'): - cloud_provider = 'aws' - elif is_state('endpoint.gcp.ready'): - cloud_provider = 'gce' - elif is_state('endpoint.openstack.ready'): - cloud_provider = 'openstack' - elif is_state('endpoint.vsphere.ready'): - cloud_provider = 'vsphere' - elif is_state('endpoint.azure.ready'): - cloud_provider = 'azure' - if cloud_provider == 'aws': + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": return getfqdn().lower() else: return gethostname().lower() -def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, - user='ubuntu', context='juju-context', - cluster='juju-cluster', password=None, token=None, - keystone=False, aws_iam_cluster_id=None): - '''Create a configuration for Kubernetes based on path using the supplied +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user - context and cluster.''' + context and cluster.""" if not key and not certificate and not password and not token: - raise ValueError('Missing authentication mechanism.') + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") - # token and password are mutually exclusive. Error early if both are - # present. The developer has requested an impossible situation. - # see: kubectl config set-credentials --help - if token and password: - raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. - cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ - '--server={2} --certificate-authority={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) # Delete old users - cmd = 'kubectl config --kubeconfig={0} unset users' - check_call(split(cmd.format(kubeconfig))) + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials {1} '.format(kubeconfig, user) + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) if key and certificate: - cmd = '{0} --client-key={1} --client-certificate={2} '\ - '--embed-certs=true'.format(cmd, key, certificate) + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) if password: cmd = "{0} --username={1} --password={2}".format(cmd, user, password) # This is mutually exclusive from password. They will not work together. @@ -328,71 +367,87 @@ def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, cmd = "{0} --token={1}".format(cmd, token) check_call(split(cmd)) # Create a default context with the cluster. - cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ - '--cluster={2} --user={3}' - check_call(split(cmd.format(kubeconfig, context, cluster, user))) + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) # Make the config use this new context. - cmd = 'kubectl config --kubeconfig={0} use-context {1}' - check_call(split(cmd.format(kubeconfig, context))) + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) if keystone: # create keystone user - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials keystone-user'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) check_call(split(cmd)) # create keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=keystone-user keystone'.format(kubeconfig, cluster) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) check_call(split(cmd)) # use keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'use-context keystone'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) check_call(split(cmd)) # manually add exec command until kubectl can do it for us - with open(kubeconfig, "r") as f: + with open(new_kubeconfig, "r") as f: content = f.read() - content = content.replace("""- name: keystone-user - user: {}""", """- name: keystone-user + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user user: exec: command: "/snap/bin/client-keystone-auth" apiVersion: "client.authentication.k8s.io/v1beta1" -""") - with open(kubeconfig, "w") as f: +""", + ) + with open(new_kubeconfig, "w") as f: f.write(content) if aws_iam_cluster_id: # create aws-iam context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=aws-iam-user aws-iam-authenticator' - check_call(split(cmd.format(kubeconfig, cluster))) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) # append a user for aws-iam - cmd = 'kubectl --kubeconfig={0} config set-credentials ' \ - 'aws-iam-user --exec-command=aws-iam-authenticator ' \ - '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \ - '--exec-arg="-r" --exec-arg="<>" ' \ - '--exec-api-version=client.authentication.k8s.io/v1alpha1' - check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id))) + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) # not going to use aws-iam context by default since we don't have # the desired arn. This will make the config not usable if copied. # cmd = 'kubectl config --kubeconfig={0} ' \ - # 'use-context aws-iam-authenticator'.format(kubeconfig) + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) def parse_extra_args(config_key): - elements = hookenv.config().get(config_key, '').split() + elements = hookenv.config().get(config_key, "").split() args = {} for element in elements: - if '=' in element: - key, _, value = element.partition('=') + if "=" in element: + key, _, value = element.partition("=") args[key] = value else: - args[element] = 'true' + args[element] = "true" return args @@ -411,7 +466,7 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # CIS benchmark action may inject kv config to pass failing tests. Merge # these after the func args as they should take precedence. - cis_args_key = 'cis-' + service + cis_args_key = "cis-" + service cis_args = db.get(cis_args_key) or {} args.update(cis_args) @@ -419,16 +474,16 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # construct an arg string for use by 'snap set'. args = {k: v for k, v in args.items() if v is not None} args = ['--%s="%s"' % arg for arg in args.items()] - args = ' '.join(args) + args = " ".join(args) snap_opts = {} for arg in prev_snap_args: # remove previous args by setting to null - snap_opts[arg] = 'null' - snap_opts['args'] = args - snap_opts = ['%s=%s' % opt for opt in snap_opts.items()] + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] - cmd = ['snap', 'set', service] + snap_opts + cmd = ["snap", "set", service] + snap_opts check_call(cmd) # Now that we've started doing snap configuration through the "args" @@ -437,36 +492,36 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): def _snap_common_path(component): - return Path('/var/snap/{}/common'.format(component)) + return Path("/var/snap/{}/common".format(component)) def cloud_config_path(component): - return _snap_common_path(component) / 'cloud-config.conf' + return _snap_common_path(component) / "cloud-config.conf" def _gcp_creds_path(component): - return _snap_common_path(component) / 'gcp-creds.json' + return _snap_common_path(component) / "gcp-creds.json" def _daemon_env_path(component): - return _snap_common_path(component) / 'environment' + return _snap_common_path(component) / "environment" def _cloud_endpoint_ca_path(component): - return _snap_common_path(component) / 'cloud-endpoint-ca.crt' + return _snap_common_path(component) / "cloud-endpoint-ca.crt" def encryption_config_path(): - apiserver_snap_common_path = _snap_common_path('kube-apiserver') - encryption_conf_dir = apiserver_snap_common_path / 'encryption' - return encryption_conf_dir / 'encryption_config.yaml' + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" def write_gcp_snap_config(component): # gcp requires additional credentials setup - gcp = endpoint_from_flag('endpoint.gcp.ready') + gcp = endpoint_from_flag("endpoint.gcp.ready") creds_path = _gcp_creds_path(component) - with creds_path.open('w') as fp: + with creds_path.open("w") as fp: os.fchmod(fp.fileno(), 0o600) fp.write(gcp.credentials) @@ -474,197 +529,206 @@ def write_gcp_snap_config(component): # services use the creds env var instead of the metadata server, as # well as making the cluster multizone comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text('[Global]\n' - 'token-url = nil\n' - 'multizone = true\n') + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) daemon_env_path = _daemon_env_path(component) if daemon_env_path.exists(): daemon_env = daemon_env_path.read_text() - if not daemon_env.endswith('\n'): - daemon_env += '\n' + if not daemon_env.endswith("\n"): + daemon_env += "\n" else: - daemon_env = '' + daemon_env = "" if gcp_creds_env_key not in daemon_env: - daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path) + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) daemon_env_path.parent.mkdir(parents=True, exist_ok=True) daemon_env_path.write_text(daemon_env) def generate_openstack_cloud_config(): # openstack requires additional credentials setup - openstack = endpoint_from_flag('endpoint.openstack.ready') + openstack = endpoint_from_flag("endpoint.openstack.ready") lines = [ - '[Global]', - 'auth-url = {}'.format(openstack.auth_url), - 'region = {}'.format(openstack.region), - 'username = {}'.format(openstack.username), - 'password = {}'.format(openstack.password), - 'tenant-name = {}'.format(openstack.project_name), - 'domain-name = {}'.format(openstack.user_domain_name), - 'tenant-domain-name = {}'.format(openstack.project_domain_name), + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), ] if openstack.endpoint_tls_ca: - lines.append('ca-file = /etc/config/endpoint-ca.cert') + lines.append("ca-file = /etc/config/endpoint-ca.cert") - lines.extend([ - '', - '[LoadBalancer]', - ]) + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) if openstack.has_octavia in (True, None): # Newer integrator charm will detect whether underlying OpenStack has # Octavia enabled so we can set this intelligently. If we're still # related to an older integrator, though, default to assuming Octavia # is available. - lines.append('use-octavia = true') + lines.append("use-octavia = true") else: - lines.append('use-octavia = false') - lines.append('lb-provider = haproxy') + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") if openstack.subnet_id: - lines.append('subnet-id = {}'.format(openstack.subnet_id)) + lines.append("subnet-id = {}".format(openstack.subnet_id)) if openstack.floating_network_id: - lines.append('floating-network-id = {}'.format( - openstack.floating_network_id)) + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) if openstack.lb_method: - lines.append('lb-method = {}'.format( - openstack.lb_method)) + lines.append("lb-method = {}".format(openstack.lb_method)) if openstack.manage_security_groups: - lines.append('manage-security-groups = {}'.format( - openstack.manage_security_groups)) - if any([openstack.bs_version, - openstack.trust_device_path, - openstack.ignore_volume_az]): - lines.append('') - lines.append('[BlockStorage]') + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") if openstack.bs_version is not None: - lines.append('bs-version = {}'.format(openstack.bs_version)) + lines.append("bs-version = {}".format(openstack.bs_version)) if openstack.trust_device_path is not None: - lines.append('trust-device-path = {}'.format( - openstack.trust_device_path)) + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) if openstack.ignore_volume_az is not None: - lines.append('ignore-volume-az = {}'.format( - openstack.ignore_volume_az)) - return '\n'.join(lines) + '\n' + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" def write_azure_snap_config(component): - azure = endpoint_from_flag('endpoint.azure.ready') + azure = endpoint_from_flag("endpoint.azure.ready") comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text(json.dumps({ - 'useInstanceMetadata': True, - 'useManagedIdentityExtension': True, - 'subscriptionId': azure.subscription_id, - 'resourceGroup': azure.resource_group, - 'location': azure.resource_group_location, - 'vnetName': azure.vnet_name, - 'vnetResourceGroup': azure.vnet_resource_group, - 'subnetName': azure.subnet_name, - 'securityGroupName': azure.security_group_name, - 'loadBalancerSku': 'standard' - })) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) -def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr, - bind_address=None): +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): kube_proxy_opts = {} - kube_proxy_opts['cluster-cidr'] = cluster_cidr - kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path - kube_proxy_opts['logtostderr'] = 'true' - kube_proxy_opts['v'] = '0' + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" num_apis = len(api_servers) - kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis] - kube_proxy_opts['hostname-override'] = get_node_name() + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() if bind_address: - kube_proxy_opts['bind-address'] = bind_address + kube_proxy_opts["bind-address"] = bind_address elif is_ipv6(cluster_cidr): - kube_proxy_opts['bind-address'] = '::' + kube_proxy_opts["bind-address"] = "::" if host.is_container(): - kube_proxy_opts['conntrack-max-per-core'] = '0' + kube_proxy_opts["conntrack-max-per-core"] = "0" if is_dual_stack(cluster_cidr): - kube_proxy_opts['feature-gates'] = "IPv6DualStack=true" + kube_proxy_opts["feature-gates"] = "IPv6DualStack=true" - configure_kubernetes_service(configure_prefix, 'kube-proxy', - kube_proxy_opts, 'proxy-extra-args') + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) def get_unit_number(): - return int(hookenv.local_unit().split('/')[1]) + return int(hookenv.local_unit().split("/")[1]) def cluster_cidr(): - '''Return the cluster CIDR provided by the CNI''' - cni = endpoint_from_flag('cni.available') + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") if not cni: return None config = hookenv.config() - if 'default-cni' in config: + if "default-cni" in config: # master - default_cni = config['default-cni'] + default_cni = config["default-cni"] else: # worker - kube_control = endpoint_from_flag('kube-control.dns.available') + kube_control = endpoint_from_flag("kube-control.dns.available") if not kube_control: return None default_cni = kube_control.get_default_cni() - return cni.get_config(default=default_cni)['cidr'] + return cni.get_config(default=default_cni)["cidr"] def is_dual_stack(cidrs): - '''Detect IPv4/IPv6 dual stack from CIDRs''' + """Detect IPv4/IPv6 dual stack from CIDRs""" return {net.version for net in get_networks(cidrs)} == {4, 6} def is_ipv4(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv4_network(cidrs) is not None def is_ipv6(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv6_network(cidrs) is not None def is_ipv6_preferred(cidrs): - '''Detect if IPv6 is preffered from CIDRs''' + """Detect if IPv6 is preffered from CIDRs""" return get_networks(cidrs)[0].version == 6 def get_networks(cidrs): - '''Convert a comma-separated list of CIDRs to a list of networks.''' + """Convert a comma-separated list of CIDRs to a list of networks.""" if not cidrs: return [] - return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] def get_ipv4_network(cidrs): - '''Get the IPv4 network from the given CIDRs or None''' + """Get the IPv4 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(4) def get_ipv6_network(cidrs): - '''Get the IPv6 network from the given CIDRs or None''' + """Get the IPv6 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(6) def enable_ipv6_forwarding(): - '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.''' - check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1']) + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) def get_bind_addrs(ipv4=True, ipv6=True): - '''Get all global-scoped addresses that we might bind to.''' + """Get all global-scoped addresses that we might bind to.""" try: output = check_output(["ip", "-br", "addr", "show", "scope", "global"]) except CalledProcessError: # stderr will have any details, and go to the log - hookenv.log('Unable to determine global addresses', hookenv.ERROR) + hookenv.log("Unable to determine global addresses", hookenv.ERROR) return [] - ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker') + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") accept_versions = set() if ipv4: accept_versions.add(4) @@ -672,10 +736,11 @@ def get_bind_addrs(ipv4=True, ipv6=True): accept_versions.add(6) addrs = [] - for line in output.decode('utf8').splitlines(): + for line in output.decode("utf8").splitlines(): intf, state, *intf_addrs = line.split() - if state != 'UP' or any(intf.startswith(prefix) - for prefix in ignore_interfaces): + if state != "UP" or any( + intf.startswith(prefix) for prefix in ignore_interfaces + ): continue for addr in intf_addrs: ip_addr = ipaddress.ip_interface(addr).ip @@ -689,24 +754,171 @@ class InvalidVMwareHost(Exception): def _get_vmware_uuid(): - serial_id_file = '/sys/class/dmi/id/product_serial' + serial_id_file = "/sys/class/dmi/id/product_serial" # The serial id from VMWare VMs comes in following format: # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 try: - with open(serial_id_file, 'r') as f: + with open(serial_id_file, "r") as f: serial_string = f.read().strip() if "VMware-" not in serial_string: - hookenv.log("Unable to find VMware ID in " - "product_serial: {}".format(serial_string)) + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) raise InvalidVMwareHost - serial_string = serial_string.split( - "VMware-")[1].replace(" ", "").replace("-", "") + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) uuid = "%s-%s-%s-%s-%s" % ( - serial_string[0:8], serial_string[8:12], serial_string[12:16], - serial_string[16:20], serial_string[20:32]) + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) except IOError as err: hookenv.log("Unable to read UUID from sysfs: {}".format(err)) - uuid = 'UNKNOWN' + uuid = "UNKNOWN" return uuid + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None diff --git a/kubernetes-master/lib/charms/layer/kubernetes_master.py b/kubernetes-master/lib/charms/layer/kubernetes_master.py index 89a783e..9b72c34 100644 --- a/kubernetes-master/lib/charms/layer/kubernetes_master.py +++ b/kubernetes-master/lib/charms/layer/kubernetes_master.py @@ -1,29 +1,28 @@ import csv import json import random -import re import socket import string -import tempfile -from base64 import b64decode, b64encode from pathlib import Path import ipaddress from subprocess import check_output, CalledProcessError, TimeoutExpired +from time import sleep from yaml import safe_load +from charmhelpers.core import host from charmhelpers.core import hookenv from charmhelpers.core.templating import render from charmhelpers.core import unitdata from charmhelpers.fetch import apt_install -from charms.reactive import endpoint_from_flag, is_flag_set +from charms.reactive import endpoint_from_flag, endpoint_from_name, is_flag_set from charms.layer import kubernetes_common +from charms.layer.kubernetes_common import AUTH_SECRET_NS, create_secret AUTH_BACKUP_EXT = "pre-secrets" AUTH_BASIC_FILE = "/root/cdk/basic_auth.csv" -AUTH_SECRET_NS = "kube-system" -AUTH_SECRET_TYPE = "juju.is/token-auth" AUTH_TOKENS_FILE = "/root/cdk/known_tokens.csv" +EXTERNAL_API_PORT = 443 STANDARD_API_PORT = 6443 CEPH_CONF_DIR = Path("/etc/ceph") CEPH_CONF = CEPH_CONF_DIR / "ceph.conf" @@ -32,10 +31,9 @@ CEPH_KEYRING = CEPH_CONF_DIR / "ceph.client.admin.keyring" db = unitdata.kv() -def get_external_lb_endpoints(): +def get_endpoints_from_config(): """ - Return a list of any external API load-balancer endpoints that have - been manually configured. + Return a list of any manually configured API endpoints. """ ha_connected = is_flag_set("ha.connected") forced_lb_ips = hookenv.config("loadbalancer-ips").split() @@ -54,41 +52,120 @@ def get_external_lb_endpoints(): return [] -def get_lb_endpoints(): +def get_local_api_endpoint(): """ - Return all load-balancer endpoints, whether from manual config or via - relation. - """ - external_lb_endpoints = get_external_lb_endpoints() - loadbalancer = endpoint_from_flag("loadbalancer.available") + Return the local address & port for self-access. - if external_lb_endpoints: - return external_lb_endpoints - elif loadbalancer: + Returns a list with a single tuple to match the other functions below. + """ + return [("127.0.0.1", STANDARD_API_PORT)] + + +def get_internal_api_endpoints(relation=None): + """ + Determine the best API endpoints for an internal client to connect to. + + If a relation is given, it will try to take that into account. + + May return an empty list if an endpoint is expected but not yet available. + """ + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + goal_state.setdefault("relations", {}) + + # Config takes precedence. + endpoints_from_config = get_endpoints_from_config() + if endpoints_from_config: + return endpoints_from_config + + # If the internal LB relation is attached, use that or nothing. If it's + # not attached but the external LB relation is, use that or nothing. + for lb_type in ("internal", "external"): + lb_endpoint = "loadbalancer-" + lb_type + request_name = "api-server-" + lb_type + api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT + if lb_endpoint in goal_state["relations"]: + lb_provider = endpoint_from_name(lb_endpoint) + lb_response = lb_provider.get_response(request_name) + if not lb_response or lb_response.error: + return [] + return [(lb_response.address, api_port)] + + # Support the older loadbalancer relation (public-address interface). + if "loadbalancer" in goal_state["relations"]: + loadbalancer = endpoint_from_name("loadbalancer") lb_addresses = loadbalancer.get_addresses_ports() return [(host.get("public-address"), host.get("port")) for host in lb_addresses] - else: - return [] + + # No LBs of any kind, so fall back to ingress-address. + if not relation: + kube_control = endpoint_from_name("kube-control") + if not kube_control.relations: + return [] + relation = kube_control.relations[0] + ingress_address = hookenv.ingress_address( + relation.relation_id, hookenv.local_unit() + ) + return [(ingress_address, STANDARD_API_PORT)] -def get_api_endpoint(relation=None): +def get_external_api_endpoints(): """ - Determine the best endpoint for a client to connect to. + Determine the best API endpoints for an external client to connect to. - If a relation is given, it will take that into account when choosing an - endpoint. + May return an empty list if an endpoint is expected but not yet available. """ - endpoints = get_lb_endpoints() - if endpoints: - # select a single endpoint based on our local unit number - return endpoints[kubernetes_common.get_unit_number() % len(endpoints)] - elif relation: - ingress_address = hookenv.ingress_address( - relation.relation_id, hookenv.local_unit() - ) - return (ingress_address, STANDARD_API_PORT) - else: - return (hookenv.unit_public_ip(), STANDARD_API_PORT) + try: + goal_state = hookenv.goal_state() + except NotImplementedError: + goal_state = {} + goal_state.setdefault("relations", {}) + + # Config takes precedence. + endpoints_from_config = get_endpoints_from_config() + if endpoints_from_config: + return endpoints_from_config + + # If the external LB relation is attached, use that or nothing. If it's + # not attached but the internal LB relation is, use that or nothing. + for lb_type in ("external", "internal"): + lb_endpoint = "loadbalancer-" + lb_type + lb_name = "api-server-" + lb_type + api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT + if lb_endpoint in goal_state["relations"]: + lb_provider = endpoint_from_name(lb_endpoint) + lb_response = lb_provider.get_response(lb_name) + if not lb_response or lb_response.error: + return [] + return [(lb_response.address, api_port)] + + # Support the older loadbalancer relation (public-address interface). + if "loadbalancer" in goal_state["relations"]: + loadbalancer = endpoint_from_name("loadbalancer") + lb_addresses = loadbalancer.get_addresses_ports() + return [(host.get("public-address"), host.get("port")) for host in lb_addresses] + + # No LBs of any kind, so fall back to public-address. + return [(hookenv.unit_public_ip(), STANDARD_API_PORT)] + + +def get_api_urls(endpoints): + """ + Convert a list of API server endpoints to URLs. + """ + return ["https://{0}:{1}".format(*endpoint) for endpoint in endpoints] + + +def get_api_url(endpoints): + """ + Choose an API endpoint from the list and build a URL from it. + """ + if not endpoints: + return None + urls = get_api_urls(endpoints) + return urls[kubernetes_common.get_unit_number() % len(urls)] def install_ceph_common(): @@ -189,19 +266,6 @@ def migrate_auth_file(filename): return True -def generate_rfc1123(length=10): - """Generate a random string compliant with RFC 1123. - - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names - - param: length - the length of the string to generate - """ - length = 253 if length > 253 else length - valid_chars = string.ascii_lowercase + string.digits - rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) - return rand_str - - def token_generator(length=32): """Generate a random token for use in account tokens. @@ -248,43 +312,6 @@ def create_known_token(token, username, user, groups=None): ) -def create_secret(token, username, user, groups=None): - secrets = get_secret_names() - if username in secrets: - # Use existing secret ID if one exists for our username - secret_id = secrets[username] - else: - # secret IDs must be unique and rfc1123 compliant - sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) - secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) - - # The authenticator expects tokens to be in the form user::token - token_delim = "::" - if token_delim not in token: - token = "{}::{}".format(user, token) - - context = { - "type": AUTH_SECRET_TYPE, - "secret_name": secret_id, - "secret_namespace": AUTH_SECRET_NS, - "user": b64encode(user.encode("UTF-8")).decode("utf-8"), - "username": b64encode(username.encode("UTF-8")).decode("utf-8"), - "password": b64encode(token.encode("UTF-8")).decode("utf-8"), - "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", - } - with tempfile.NamedTemporaryFile() as tmp_manifest: - render( - "cdk.master.auth-webhook-secret.yaml", tmp_manifest.name, context=context - ) - - if kubernetes_common.kubectl_manifest("apply", tmp_manifest.name): - hookenv.log("Created secret for {}".format(username)) - return True - else: - hookenv.log("WARN: Unable to create secret for {}".format(username)) - return False - - def delete_secret(secret_id): """Delete a given secret id.""" # If this fails, it's most likely because we're trying to delete a secret @@ -312,90 +339,6 @@ def get_csv_password(csv_fname, user): return None -def get_secret_names(): - """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" - try: - output = kubernetes_common.kubectl( - "get", - "secrets", - "-n", - AUTH_SECRET_NS, - "--field-selector", - "type={}".format(AUTH_SECRET_TYPE), - "-o", - "json", - ).decode("UTF-8") - except (CalledProcessError, FileNotFoundError): - # The api server may not be up, or we may be trying to run kubelet before - # the snap is installed. Send back an empty dict. - hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) - return {} - - secrets = json.loads(output) - secret_names = {} - if "items" in secrets: - for secret in secrets["items"]: - try: - secret_id = secret["metadata"]["name"] - username_b64 = secret["data"]["username"].encode("UTF-8") - except (KeyError, TypeError): - # CK secrets will have populated 'data', but not all secrets do - continue - secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id - return secret_names - - -def get_secret_password(username): - """Get the password for the given user from the secret that CK created.""" - try: - output = kubernetes_common.kubectl( - "get", - "secrets", - "-n", - AUTH_SECRET_NS, - "--field-selector", - "type={}".format(AUTH_SECRET_TYPE), - "-o", - "json", - ).decode("UTF-8") - except CalledProcessError: - # NB: apiserver probably isn't up. This can happen on boostrap or upgrade - # while trying to build kubeconfig files. If we need the 'admin' token during - # this time, pull it directly out of the kubeconfig file if possible. - token = None - if username == "admin": - admin_kubeconfig = Path("/root/.kube/config") - if admin_kubeconfig.exists(): - with admin_kubeconfig.open("r") as f: - data = safe_load(f) - try: - token = data["users"][0]["user"]["token"] - except (KeyError, ValueError): - pass - return token - except FileNotFoundError: - # New deployments may ask for a token before the kubectl snap is installed. - # Give them nothing! - return None - - secrets = json.loads(output) - if "items" in secrets: - for secret in secrets["items"]: - try: - data_b64 = secret["data"] - password_b64 = data_b64["password"].encode("UTF-8") - username_b64 = data_b64["username"].encode("UTF-8") - except (KeyError, TypeError): - # CK authn secrets will have populated 'data', but not all secrets do - continue - - password = b64decode(password_b64).decode("UTF-8") - secret_user = b64decode(username_b64).decode("UTF-8") - if username == secret_user: - return password - return None - - try: ipaddress.IPv4Network.subnet_of except AttributeError: @@ -443,7 +386,7 @@ def is_service_cidr_expansion(): def service_cidr(): - """ Return the charm's service-cidr config""" + """Return the charm's service-cidr config""" frozen_cidr = db.get("kubernetes-master.service-cidr") return frozen_cidr or hookenv.config("service-cidr") @@ -506,3 +449,31 @@ def get_snap_revs(snaps): ) rev_info[s] = snap_rev return rev_info + + +def check_service(service, attempts=6, delay=10): + """Check if a given service is up, giving it a bit of time to come up if needed. + + Returns True if the service is running, False if not, or raises a ValueError if + the service is unknown. Will automatically handle translating master component + names (e.g., kube-apiserver) to service names (snap.kube-apiserver.daemon). + """ + for pattern in ("{}", "snap.{}", "snap.{}.daemon", "snap.kube-{}.daemon"): + if host.service("is-enabled", pattern.format(service)): + service = pattern.format(service) + break + else: + raise ValueError("Unknown service: {}".format(service)) + # Give each service up to a minute to become active; this is especially + # needed now that controller-mgr/scheduler/proxy need the apiserver + # to validate their token against a k8s secret. + attempt = 0 + while attempt < attempts: + hookenv.log( + "Checking if {} is active ({} / {})".format(service, attempt, attempts) + ) + if host.service_running(service): + return True + sleep(delay) + attempt += 1 + return False diff --git a/kubernetes-master/lib/charms/layer/snap.py b/kubernetes-master/lib/charms/layer/snap.py index 88b8d89..06cc4b1 100644 --- a/kubernetes-master/lib/charms/layer/snap.py +++ b/kubernetes-master/lib/charms/layer/snap.py @@ -300,7 +300,15 @@ def get_installed_channel(snapname): hookenv.WARNING, ) return - return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + try: + return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + except Exception as e: + # If it fails to get the channel information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap tracking (channel): {}".format(e), + hookenv.WARNING, + ) + return def _snap_args( @@ -351,25 +359,28 @@ def _install_store(snapname, **kw): cmd.append(snapname) hookenv.log("Installing {} from store".format(snapname)) - for attempt in tenacity.Retrying( + # Use tenacity decorator for Trusty support (See LP Bug #1934163) + @tenacity.retry( wait=tenacity.wait_fixed(10), # seconds stop=tenacity.stop_after_attempt(3), reraise=True, - ): - with attempt: - try: - out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - hookenv.log( - 'Installation successful cmd="{}" output="{}"'.format(cmd, out), - level=hookenv.DEBUG, - ) - reactive.clear_flag(get_local_flag(snapname)) - except subprocess.CalledProcessError as cp: - hookenv.log( - 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), - level=hookenv.ERROR, - ) - raise + ) + def _run_install(): + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + hookenv.log( + 'Installation successful cmd="{}" output="{}"'.format(cmd, out), + level=hookenv.DEBUG, + ) + reactive.clear_flag(get_local_flag(snapname)) + except subprocess.CalledProcessError as cp: + hookenv.log( + 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), + level=hookenv.ERROR, + ) + raise + + _run_install() def _refresh_store(snapname, **kw): diff --git a/kubernetes-master/lib/charms/layer/vault_kv.py b/kubernetes-master/lib/charms/layer/vault_kv.py index 8ca023c..fc30c19 100644 --- a/kubernetes-master/lib/charms/layer/vault_kv.py +++ b/kubernetes-master/lib/charms/layer/vault_kv.py @@ -248,7 +248,13 @@ def _get_secret_id(vault): # being told to rotate the secret ID, or we might not have fetched # one yet vault_url = vault.vault_url - secret_id = retrieve_secret_id(vault_url, token) + try: + secret_id = retrieve_secret_id(vault_url, token) + except (requests.exceptions.ConnectionError, + hvac.exceptions.VaultDown, + hvac.exceptions.VaultNotInitialized, + hvac.exceptions.BadGateway) as e: + raise VaultNotReady() from e unitdata.kv().set('layer.vault-kv.secret_id', secret_id) # have to flush immediately because if we don't and hit some error # elsewhere, it could get us into a state where we have forgotten the diff --git a/kubernetes-master/metadata.yaml b/kubernetes-master/metadata.yaml index 2446cb1..5fa8e5f 100644 --- a/kubernetes-master/metadata.yaml +++ b/kubernetes-master/metadata.yaml @@ -34,6 +34,8 @@ "etcd": "interface": "etcd" "loadbalancer": + # Use of this relation is strongly discouraged in favor of the more + # explicit loadbalancer-internal / loadbalancer-external relations. "interface": "public-address" "ceph-storage": "interface": "ceph-admin" @@ -53,6 +55,14 @@ "interface": "keystone-credentials" "dns-provider": "interface": "kube-dns" + "loadbalancer-internal": + # Indicates that the LB should not be public and should use internal + # networks if available. Intended for control plane and other internal use. + "interface": "loadbalancer" + "loadbalancer-external": + # Indicates that the LB should be public facing. Intended for clients which + # must reach the API server via external networks. + "interface": "loadbalancer" "provides": "nrpe-external-master": "interface": "nrpe-external-master" @@ -61,12 +71,14 @@ "interface": "container-runtime" "scope": "container" "kube-api-endpoint": + # Use of this relation is strongly discouraged as the API endpoints will be + # provided via the kube-control relation. However, it can be used to + # override those endpoints if you need to inject a reverse proxy between + # the master and workers using a charm which only supports the old MITM + # style relations. Note, though, that since this reverse proxy will not be + # visible to the master, it will not be used in any of the client or + # component kube config files. "interface": "http" - "cluster-dns": - # kube-dns is deprecated. Its functionality has been rolled into the - # kube-control interface. The cluster-dns relation will be removed in - # a future release. - "interface": "kube-dns" "kube-control": "interface": "kube-control" "cni": @@ -88,29 +100,50 @@ "core": "type": "file" "filename": "core.snap" - "description": "core snap" + "description": | + core snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kubectl": "type": "file" "filename": "kubectl.snap" - "description": "kubectl snap" + "description": | + kubectl snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kube-apiserver": "type": "file" "filename": "kube-apiserver.snap" - "description": "kube-apiserver snap" + "description": | + kube-apiserver snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kube-controller-manager": "type": "file" "filename": "kube-controller-manager.snap" - "description": "kube-controller-manager snap" + "description": | + kube-controller-manager snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kube-scheduler": "type": "file" "filename": "kube-scheduler.snap" - "description": "kube-scheduler snap" + "description": | + kube-scheduler snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "cdk-addons": "type": "file" "filename": "cdk-addons.snap" - "description": "CDK addons snap" + "description": | + CDK addons snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kube-proxy": "type": "file" "filename": "kube-proxy.snap" - "description": "kube-proxy snap" + "description": | + kube-proxy snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "subordinate": !!bool "false" diff --git a/kubernetes-master/reactive/kubernetes_master.py b/kubernetes-master/reactive/kubernetes_master.py index 448e191..c916cca 100644 --- a/kubernetes-master/reactive/kubernetes_master.py +++ b/kubernetes-master/reactive/kubernetes_master.py @@ -17,18 +17,17 @@ import base64 import os import re -import shutil import socket import json import traceback import yaml +from itertools import filterfalse from shutil import move, copyfile from pathlib import Path from subprocess import check_call from subprocess import check_output from subprocess import CalledProcessError -from time import sleep from urllib.request import Request, urlopen import charms.coordinator @@ -37,8 +36,8 @@ from charms.leadership import leader_get, leader_set from charms.reactive import hook from charms.reactive import remove_state, clear_flag from charms.reactive import set_state, set_flag -from charms.reactive import is_state, is_flag_set, get_unset_flags, all_flags_set -from charms.reactive import endpoint_from_flag +from charms.reactive import is_state, is_flag_set, get_unset_flags +from charms.reactive import endpoint_from_flag, endpoint_from_name from charms.reactive import when, when_any, when_not, when_none from charms.reactive import register_trigger from charms.reactive import data_changed, any_file_changed @@ -184,6 +183,10 @@ register_trigger( register_trigger( when="kube-control.requests.changed", clear_flag="authentication.setup" ) +register_trigger( + when_not="kubernetes-master.apiserver.configured", + clear_flag="kubernetes-master.apiserver.running", +) def set_upgrade_needed(forced=False): @@ -238,7 +241,6 @@ def check_for_upgrade_needed(): # to old ceph on Kubernetes 1.10 or 1.11 remove_state("kubernetes-master.ceph.configured") - migrate_from_pre_snaps() maybe_install_kube_proxy() update_certificates() switch_auth_mode(forced=True) @@ -371,58 +373,6 @@ def add_rbac_roles(): ftokens.write("{}".format(line)) -def rename_file_idempotent(source, destination): - if os.path.isfile(source): - os.rename(source, destination) - - -def migrate_from_pre_snaps(): - # remove old states - remove_state("kubernetes.components.installed") - remove_state("kubernetes.dashboard.available") - remove_state("kube-dns.available") - remove_state("kubernetes-master.app_version.set") - - # disable old services - pre_snap_services = ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] - for service in pre_snap_services: - service_stop(service) - - # rename auth files - os.makedirs("/root/cdk", exist_ok=True) - rename_file_idempotent( - "/etc/kubernetes/serviceaccount.key", "/root/cdk/serviceaccount.key" - ) - rename_file_idempotent("/srv/kubernetes/basic_auth.csv", "/root/cdk/basic_auth.csv") - rename_file_idempotent( - "/srv/kubernetes/known_tokens.csv", "/root/cdk/known_tokens.csv" - ) - - # cleanup old files - files = [ - "/lib/systemd/system/kube-apiserver.service", - "/lib/systemd/system/kube-controller-manager.service", - "/lib/systemd/system/kube-scheduler.service", - "/etc/default/kube-defaults", - "/etc/default/kube-apiserver.defaults", - "/etc/default/kube-controller-manager.defaults", - "/etc/default/kube-scheduler.defaults", - "/home/ubuntu/kubectl", - "/usr/local/bin/kubectl", - "/usr/local/bin/kube-apiserver", - "/usr/local/bin/kube-controller-manager", - "/usr/local/bin/kube-scheduler", - "/etc/kubernetes", - ] - for file in files: - if os.path.isdir(file): - hookenv.log("Removing directory: " + file) - shutil.rmtree(file) - elif os.path.isfile(file): - hookenv.log("Removing file: " + file) - os.remove(file) - - @when("kubernetes-master.upgrade-specified") def do_upgrade(): install_snaps() @@ -600,7 +550,7 @@ def storage_backend_changed(): def configure_cni(cni): """Set master configuration on the CNI relation. This lets the CNI subordinate know that we're the master so it can respond accordingly.""" - cni.set_config(is_master=True, kubeconfig_path="") + cni.set_config(is_master=True) @when("leadership.is_leader") @@ -733,7 +683,7 @@ def get_keys_from_leader(keys, overwrite_local=False): @when("kubernetes-master.snaps.installed") def set_app_version(): - """ Declare the application version to juju """ + """Declare the application version to juju""" version = check_output(["kube-apiserver", "--version"]) hookenv.application_version_set(version.split(b" v")[-1].rstrip()) @@ -756,7 +706,7 @@ def check_vault_pending(): @hookenv.atexit def set_final_status(): - """ Set the final status of the charm as we leave hook execution """ + """Set the final status of the charm as we leave hook execution""" try: goal_state = hookenv.goal_state() except NotImplementedError: @@ -767,7 +717,10 @@ def set_final_status(): return if not is_flag_set("certificates.available"): - hookenv.status_set("blocked", "Missing relation to certificate authority.") + if "certificates" in goal_state.get("relations", {}): + hookenv.status_set("waiting", "Waiting for certificates authority.") + else: + hookenv.status_set("blocked", "Missing relation to certificate authority.") return if is_flag_set("kubernetes-master.secure-storage.failed"): @@ -780,7 +733,7 @@ def set_final_status(): elif is_flag_set("kubernetes-master.secure-storage.created"): if not encryption_config_path().exists(): hookenv.status_set( - "blocked", "VaultLocker containing encryption config " "unavailable" + "blocked", "VaultLocker containing encryption config unavailable" ) return @@ -800,13 +753,17 @@ def set_final_status(): hookenv.status_set("waiting", "Waiting for cloud integration") return - if not is_state("kube-api-endpoint.available"): - if "kube-api-endpoint" in goal_state.get("relations", {}): - status = "waiting" - else: - status = "blocked" - hookenv.status_set(status, "Waiting for kube-api-endpoint relation") - return + if "kube-api-endpoint" in goal_state.get("relations", {}): + if not is_state("kube-api-endpoint.available"): + hookenv.status_set("waiting", "Waiting for kube-api-endpoint relation") + return + + for lb_endpoint in ("loadbalancer-internal", "loadbalancer-external"): + if lb_endpoint in goal_state.get("relations", {}): + lb_provider = endpoint_from_name(lb_endpoint) + if not lb_provider.has_response: + hookenv.status_set("waiting", "Waiting for " + lb_endpoint) + return if not is_state("kube-control.connected"): if "kube-control" in goal_state.get("relations", {}): @@ -841,7 +798,7 @@ def set_final_status(): if is_state("kubernetes-master.vault-kv.pending"): hookenv.status_set( - "waiting", "Waiting for encryption info from Vault " "to secure secrets" + "waiting", "Waiting for encryption info from Vault to secure secrets" ) return @@ -851,10 +808,45 @@ def set_final_status(): ) return - auth_setup = is_flag_set("authentication.setup") - webhook_tokens_setup = is_flag_set("kubernetes-master.auth-webhook-tokens.setup") - if auth_setup and not webhook_tokens_setup: - hookenv.status_set("waiting", "Failed to setup auth-webhook tokens; will retry") + if not is_state("etcd.available"): + if "etcd" in goal_state.get("relations", {}): + status = "waiting" + else: + status = "blocked" + hookenv.status_set(status, "Waiting for etcd") + return + + if not is_state("cni.available"): + if "cni" in goal_state.get("relations", {}): + status = "waiting" + else: + status = "blocked" + hookenv.status_set(status, "Waiting for CNI plugins to become available") + return + + if not is_state("tls_client.certs.saved"): + hookenv.status_set("waiting", "Waiting for certificates") + return + + if not is_flag_set("kubernetes-master.auth-webhook-service.started"): + hookenv.status_set("waiting", "Waiting for auth-webhook service to start") + return + + if not is_flag_set("kubernetes-master.apiserver.configured"): + hookenv.status_set("waiting", "Waiting for API server to be configured") + return + + if not is_flag_set("kubernetes-master.apiserver.running"): + hookenv.status_set("waiting", "Waiting for API server to start") + return + + authentication_setup = is_state("authentication.setup") + if not authentication_setup: + hookenv.status_set("waiting", "Waiting on crypto keys.") + return + + if not is_flag_set("kubernetes-master.auth-webhook-tokens.setup"): + hookenv.status_set("waiting", "Waiting for auth-webhook tokens") return if is_state("kubernetes-master.components.started"): @@ -867,20 +859,13 @@ def set_final_status(): else: # if we don't have components starting, we're waiting for that and # shouldn't fall through to Kubernetes master running. - if is_state("cni.available"): - hookenv.status_set("maintenance", "Waiting for master components to start") - else: - hookenv.status_set("waiting", "Waiting for CNI plugins to become available") + hookenv.status_set("maintenance", "Waiting for master components to start") return # Note that after this point, kubernetes-master.components.started is # always True. - is_leader = is_state("leadership.is_leader") - authentication_setup = is_state("authentication.setup") - if not is_leader and not authentication_setup: - hookenv.status_set("waiting", "Waiting on leader's crypto keys.") - return + is_leader = is_state("leadership.is_leader") addons_configured = is_state("cdk-addons.configured") if is_leader and not addons_configured: hookenv.status_set("waiting", "Waiting to retry addon deployment") @@ -951,27 +936,7 @@ def master_services_down(): """Ensure master services are up and running. Return: list of failing services""" - failing_services = [] - for service in master_services: - daemon = "snap.{}.daemon".format(service) - - # Give each service up to a minute to become active; this is especially - # needed now that controller-mgr/scheduler/proxy need the apiserver - # to validate their token against a k8s secret. - attempt = 0 - delay = 10 - times = 6 - while attempt < times: - hookenv.log( - "Checking if {} is active ({} / {})".format(daemon, attempt, times) - ) - if host.service_running(daemon): - break - sleep(delay) - attempt += 1 - else: - failing_services.append(service) - return failing_services + return list(filterfalse(kubernetes_master.check_service, master_services)) def add_systemd_file_limit(): @@ -1068,7 +1033,9 @@ def register_auth_webhook(): context = { "api_ver": "v1beta1", "charm_dir": hookenv.charm_dir(), - "host": get_ingress_address("kube-api-endpoint"), + "host": get_ingress_address( + "kube-api-endpoint", ignore_addresses=[hookenv.config("ha-cluster-vip")] + ), "pidfile": "auth-webhook.pid", "port": 5000, "root_dir": auth_webhook_root, @@ -1143,7 +1110,7 @@ def register_auth_webhook(): @when( - "kubernetes-master.apiserver.configured", + "kubernetes-master.apiserver.running", "kubernetes-master.auth-webhook-service.started", "authentication.setup", ) @@ -1299,9 +1266,23 @@ def etcd_data_change(etcd): @when("kube-control.connected") @when("cdk-addons.configured") def send_cluster_dns_detail(kube_control): - """ Send cluster DNS info """ + """Send cluster DNS info""" dns_provider = endpoint_from_flag("dns-provider.available") - if dns_provider: + try: + goal_state_rels = hookenv.goal_state().get("relations", {}) + except NotImplementedError: + goal_state_rels = {} + dns_provider_missing = not dns_provider and "dns-provider" not in goal_state_rels + dns_provider_pending = not dns_provider and "dns-provider" in goal_state_rels + try: + dns_disabled_cfg = get_dns_provider() == "none" + except InvalidDnsProvider: + dns_disabled_cfg = False + if dns_provider_missing and dns_disabled_cfg: + kube_control.set_dns(None, None, None, False) + elif dns_provider_pending: + pass + elif dns_provider: details = dns_provider.details() kube_control.set_dns( details["port"], details["domain"], details["sdn-ip"], True @@ -1312,16 +1293,14 @@ def send_cluster_dns_detail(kube_control): except InvalidDnsProvider: hookenv.log(traceback.format_exc()) return - dns_enabled = dns_provider != "none" dns_domain = hookenv.config("dns_domain") dns_ip = None - if dns_enabled: - try: - dns_ip = kubernetes_master.get_dns_ip() - except CalledProcessError: - hookenv.log("DNS addon service not ready yet") - return - kube_control.set_dns(53, dns_domain, dns_ip, dns_enabled) + try: + dns_ip = kubernetes_master.get_dns_ip() + except CalledProcessError: + hookenv.log("DNS addon service not ready yet") + return + kube_control.set_dns(53, dns_domain, dns_ip, True) def create_tokens_and_sign_auth_requests(): @@ -1397,25 +1376,64 @@ def create_tokens_and_sign_auth_requests(): @when("kube-api-endpoint.available") def push_service_data(): """Send configuration to the load balancer, and close access to the - public interface""" + public interface. + """ kube_api = endpoint_from_flag("kube-api-endpoint.available") - external_endpoints = kubernetes_master.get_external_lb_endpoints() - if external_endpoints: - addresses = [e[0] for e in external_endpoints] + endpoints = kubernetes_master.get_endpoints_from_config() + if endpoints: + addresses = [e[0] for e in endpoints] kube_api.configure(kubernetes_master.STANDARD_API_PORT, addresses, addresses) else: - # no external addresses configured, so rely on the interface layer + # no manually configured LBs, so rely on the interface layer # to use the ingress address for each relation kube_api.configure(kubernetes_master.STANDARD_API_PORT) -@when("certificates.available", "kube-api-endpoint.available", "cni.available") +@when("leadership.is_leader") +@when_any( + "endpoint.loadbalancer-internal.available", + "endpoint.loadbalancer-external.available", +) +def request_load_balancers(): + """Request LBs from the related provider(s).""" + for lb_type in ("internal", "external"): + lb_provider = endpoint_from_name("loadbalancer-" + lb_type) + if not lb_provider.is_available: + continue + req = lb_provider.get_request("api-server-" + lb_type) + req.protocol = req.protocols.tcp + ext_api_port = kubernetes_master.EXTERNAL_API_PORT + int_api_port = kubernetes_master.STANDARD_API_PORT + api_port = ext_api_port if lb_type == "external" else int_api_port + req.port_mapping = {api_port: int_api_port} + req.public = lb_type == "external" + if not req.health_checks: + req.add_health_check( + protocol=req.protocols.http, + port=int_api_port, + path="/livez", + ) + lb_provider.send_request(req) + + +@when("kube-control.connected") +def send_api_urls(): + kube_control = endpoint_from_name("kube-control") + if not hasattr(kube_control, "set_api_endpoints"): + # built with an old version of the kube-control interface + # the old kube-api-endpoint relation must be used instead + return + endpoints = kubernetes_master.get_internal_api_endpoints() + if not endpoints: + return + kube_control.set_api_endpoints(kubernetes_master.get_api_urls(endpoints)) + + +@when("certificates.available", "cni.available") def send_data(): """Send the data that is required to create a server certificate for this server.""" - kube_api_endpoint = endpoint_from_flag("kube-api-endpoint.available") - # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() @@ -1430,7 +1448,10 @@ def send_data(): # Get ingress address (this is probably already covered by bind_ips, # but list it explicitly as well just in case it's not). - ingress_ip = get_ingress_address(kube_api_endpoint.endpoint_name) + old_ingress_ip = get_ingress_address("kube-api-endpoint") + new_ingress_ip = get_ingress_address("kube-control") + + local_endpoint = kubernetes_master.get_local_api_endpoint()[0][0] domain = hookenv.config("dns_domain") # Create SANs that the tls layer will add to the server cert. @@ -1439,7 +1460,9 @@ def send_data(): # The CN field is checked as a hostname, so if it's an IP, it # won't match unless also included in the SANs as an IP field. common_name, - ingress_ip, + local_endpoint, + old_ingress_ip, + new_ingress_ip, socket.gethostname(), socket.getfqdn(), "kubernetes", @@ -1452,8 +1475,8 @@ def send_data(): + bind_ips ) - lb_addrs = [e[0] for e in kubernetes_master.get_lb_endpoints()] - sans.extend(lb_addrs) + sans.extend(e[0] for e in kubernetes_master.get_internal_api_endpoints()) + sans.extend(e[0] for e in kubernetes_master.get_external_api_endpoints()) # maybe they have extra names they want as SANs extra_sans = hookenv.config("extra_sans") @@ -1514,7 +1537,7 @@ def reconfigure_cdk_addons(): ) @when_not("upgrade.series.in-progress") def configure_cdk_addons(): - """ Configure CDK addons """ + """Configure CDK addons""" remove_state("cdk-addons.reconfigure") remove_state("cdk-addons.configured") remove_state("kubernetes-master.aws.changed") @@ -1527,13 +1550,7 @@ def configure_cdk_addons(): and load_gpu_plugin == "auto" and is_state("kubernetes-master.gpu.enabled") ) - # addons-registry is deprecated in 1.15, but it should take precedence - # when configuring the cdk-addons snap until 1.17 is released. - registry = hookenv.config("addons-registry") - if registry and get_version("kube-apiserver") < (1, 17): - hookenv.log("addons-registry is deprecated; " "use image-registry instead") - else: - registry = hookenv.config("image-registry") + registry = hookenv.config("image-registry") dbEnabled = str(hookenv.config("enable-dashboard-addons")).lower() try: dnsProvider = get_dns_provider() @@ -1625,7 +1642,6 @@ def configure_cdk_addons(): "enable-azure=" + enable_azure, "enable-gcp=" + enable_gcp, "enable-openstack=" + enable_openstack, - "monitorstorage=" + hookenv.config("monitoring-storage"), "cluster-tag=" + cluster_tag, ] if openstack: @@ -1827,8 +1843,15 @@ def switch_auth_mode(forced=False): @when_not("kubernetes-master.pod-security-policy.applied") def create_pod_security_policy_resources(): pod_security_policy_path = "/root/cdk/pod-security-policy.yaml" - - render("rbac-pod-security-policy.yaml", pod_security_policy_path, {}) + pod_security_policy = hookenv.config("pod-security-policy") + if pod_security_policy: + hookenv.log("Using configuration defined on pod-security-policy option") + write_file_with_autogenerated_header( + pod_security_policy_path, pod_security_policy + ) + else: + hookenv.log("Using the default rbac-pod-security-policy template") + render("rbac-pod-security-policy.yaml", pod_security_policy_path, {}) hookenv.log("Creating pod security policy resources.") if kubectl_manifest("apply", pod_security_policy_path): @@ -2042,13 +2065,15 @@ def shutdown(): def build_kubeconfig(): """Gather the relevant data for Kubernetes configuration objects and create a config object with that information.""" - local_address = get_ingress_address("kube-api-endpoint") - local_server = "https://{0}:{1}".format(local_address, 6443) - public_address, public_port = kubernetes_master.get_api_endpoint() - public_server = "https://{0}:{1}".format(public_address, public_port) + local_endpoint = kubernetes_master.get_local_api_endpoint() + internal_endpoints = kubernetes_master.get_internal_api_endpoints() + external_endpoints = kubernetes_master.get_external_api_endpoints() # Do we have everything we need? - if ca_crt_path.exists(): + if ca_crt_path.exists() and internal_endpoints and external_endpoints: + local_url = kubernetes_master.get_api_url(local_endpoint) + internal_url = kubernetes_master.get_api_url(internal_endpoints) + external_url = kubernetes_master.get_api_url(external_endpoints) client_pass = get_token("admin") if not client_pass: # If we made it this far without a password, we're bootstrapping a new @@ -2092,7 +2117,7 @@ def build_kubeconfig(): if ks: create_kubeconfig( kubeconfig_path, - public_server, + external_url, ca_crt_path, user="admin", token=client_pass, @@ -2102,7 +2127,7 @@ def build_kubeconfig(): else: create_kubeconfig( kubeconfig_path, - public_server, + external_url, ca_crt_path, user="admin", token=client_pass, @@ -2113,19 +2138,30 @@ def build_kubeconfig(): cmd = ["chown", "ubuntu:ubuntu", kubeconfig_path] check_call(cmd) - # make a kubeconfig for root (same location on k8s-masters and workers) + # make a kubeconfig for root / the charm create_kubeconfig( kubeclientconfig_path, - local_server, + local_url, ca_crt_path, user="admin", token=client_pass, ) + # Create kubernetes configuration in the default location for ubuntu. + create_kubeconfig( + "/home/ubuntu/.kube/config", + internal_url, + ca_crt_path, + user="admin", + token=client_pass, + ) + # Make the config dir readable by the ubuntu user + check_call(["chown", "-R", "ubuntu:ubuntu", "/home/ubuntu/.kube"]) + # make a kubeconfig for cdk-addons create_kubeconfig( cdk_addons_kubectl_config_path, - local_server, + local_url, ca_crt_path, user="admin", token=client_pass, @@ -2136,7 +2172,7 @@ def build_kubeconfig(): if proxy_token: create_kubeconfig( kubeproxyconfig_path, - local_server, + local_url, ca_crt_path, token=proxy_token, user="kube-proxy", @@ -2145,7 +2181,7 @@ def build_kubeconfig(): if controller_manager_token: create_kubeconfig( kubecontrollermanagerconfig_path, - local_server, + local_url, ca_crt_path, token=controller_manager_token, user="kube-controller-manager", @@ -2154,12 +2190,16 @@ def build_kubeconfig(): if scheduler_token: create_kubeconfig( kubeschedulerconfig_path, - local_server, + local_url, ca_crt_path, token=scheduler_token, user="kube-scheduler", ) + cni = endpoint_from_name("cni") + if cni: + cni.notify_kubeconfig_changed() + def handle_etcd_relation(reldata): """Save the client credentials and set appropriate daemon flags when @@ -2221,8 +2261,14 @@ def configure_apiserver(): # Handle static options for now api_opts["service-cluster-ip-range"] = service_cidr + # Enable StreamingProxyRedirects to work around `kubectl exec` failures + # when passing through kubeapi-load-balancer. This feature will be removed + # in k8s 1.24. + # https://bugs.launchpad.net/bugs/1940527 + feature_gates = ["StreamingProxyRedirects=true"] if kubernetes_common.is_dual_stack(cluster_cidr): - api_opts["feature-gates"] = "IPv6DualStack=true" + feature_gates.append("IPv6DualStack=true") + api_opts["feature-gates"] = ",".join(feature_gates) api_opts["min-request-timeout"] = "300" api_opts["v"] = "4" api_opts["tls-cert-file"] = str(server_crt_path) @@ -2230,7 +2276,6 @@ def configure_apiserver(): api_opts["kubelet-certificate-authority"] = str(ca_crt_path) api_opts["kubelet-client-certificate"] = str(client_crt_path) api_opts["kubelet-client-key"] = str(client_key_path) - api_opts["kubelet-https"] = "true" api_opts["logtostderr"] = "true" api_opts["storage-backend"] = getStorageBackend() api_opts["insecure-port"] = "0" @@ -2387,6 +2432,15 @@ def configure_apiserver(): set_flag("kubernetes-master.had-service-cidr-expanded") set_flag("kubernetes-master.apiserver.configured") + if kubernetes_master.check_service("kube-apiserver"): + set_flag("kubernetes-master.apiserver.running") + + +@when("kubernetes-master.apiserver.configured") +@when_not("kubernetes-master.apiserver.running") +def check_apiserver(): + if kubernetes_master.check_service("kube-apiserver"): + set_flag("kubernetes-master.apiserver.running") @when( @@ -2585,7 +2639,7 @@ def get_token(username): migrated. Otherwise, fetch it from the 'known_tokens.csv' file. """ if is_flag_set("kubernetes-master.token-auth.migrated"): - return kubernetes_master.get_secret_password(username) + return kubernetes_common.get_secret_password(username) else: return kubernetes_master.get_csv_password("known_tokens.csv", username) @@ -2678,8 +2732,8 @@ def poke_network_unavailable(): discussion about refactoring the affected code but nothing has happened in a while. """ - local_address = get_ingress_address("kube-api-endpoint") - local_server = "https://{0}:{1}".format(local_address, 6443) + internal_endpoints = kubernetes_master.get_internal_api_endpoints() + internal_url = kubernetes_master.get_api_url(internal_endpoints) client_token = get_token("admin") http_header = ("Authorization", "Bearer {}".format(client_token)) @@ -2699,7 +2753,7 @@ def poke_network_unavailable(): for node in nodes: node_name = node["metadata"]["name"] - url = "{}/api/v1/nodes/{}/status".format(local_server, node_name) + url = "{}/api/v1/nodes/{}/status".format(internal_url, node_name) req = Request(url) req.add_header(*http_header) with urlopen(req) as response: @@ -3148,6 +3202,11 @@ def _write_encryption_config(): ) +@when_any("config.changed.pod-security-policy") +def pod_security_policy_config_changed(): + clear_flag("kubernetes-master.pod-security-policy.applied") + + @when_any("config.changed.ha-cluster-vip", "config.changed.ha-cluster-dns") def haconfig_changed(): clear_flag("hacluster-configured") @@ -3161,11 +3220,13 @@ def configure_hacluster(): add_service_to_hacluster(service, daemon) # get a new cert - if all_flags_set("certificates.available", "kube-api-endpoint.available"): + if is_flag_set("certificates.available"): send_data() # update workers - if is_state("kube-api-endpoint.available"): + if is_flag_set("kube-control.connected"): + send_api_urls() + if is_flag_set("kube-api-endpoint.available"): push_service_data() set_flag("hacluster-configured") @@ -3179,10 +3240,12 @@ def remove_hacluster(): remove_service_from_hacluster(service, daemon) # get a new cert - if all_flags_set("certificates.available", "kube-api-endpoint.available"): + if is_flag_set("certificates.available"): send_data() # update workers - if is_state("kube-api-endpoint.available"): + if is_flag_set("kube-control.connected"): + send_api_urls() + if is_flag_set("kube-api-endpoint.available"): push_service_data() clear_flag("hacluster-configured") @@ -3352,7 +3415,10 @@ def register_prometheus_jobs(): monitoring_token = get_token("system:monitoring") for relation in prometheus.relations: - address, port = kubernetes_master.get_api_endpoint(relation) + endpoints = kubernetes_master.get_internal_api_endpoints(relation) + if not endpoints: + continue + address, port = endpoints[0] templates_dir = Path("templates") for job_file in Path("templates/prometheus").glob("*.yaml.j2"): diff --git a/kubernetes-master/templates/cdk.auth-webhook-secret.yaml b/kubernetes-master/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubernetes-master/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubernetes-master/templates/cdk.master.auth-webhook.py b/kubernetes-master/templates/cdk.master.auth-webhook.py index 4cb0cd6..394aa7f 100644 --- a/kubernetes-master/templates/cdk.master.auth-webhook.py +++ b/kubernetes-master/templates/cdk.master.auth-webhook.py @@ -3,29 +3,75 @@ import csv import json import logging -import requests +import aiohttp +import asyncio +import signal from base64 import b64decode from copy import deepcopy -from flask import Flask, request, jsonify from pathlib import Path -from subprocess import check_call, check_output, CalledProcessError, TimeoutExpired -from yaml import safe_load -app = Flask(__name__) +from yaml import safe_load, YAMLError -def kubectl(*args): - '''Run a kubectl cli command with a config file. +AWS_IAM_ENDPOINT = '{{ aws_iam_endpoint if aws_iam_endpoint }}' +KEYSTONE_ENDPOINT = '{{ keystone_endpoint if keystone_endpoint }}' +CUSTOM_AUTHN_ENDPOINT = '{{ custom_authn_endpoint if custom_authn_endpoint }}' - Returns stdout and throws an error if the command fails. +app = aiohttp.web.Application() +routes = aiohttp.web.RouteTableDef() + +# Disable the gunicorn arbiter's SIGCHLD handler in this worker. The handler +# gets inherited by worker processes where it appears to serve no useful +# function. It also makes it impossible for workers to make subprocess calls +# safely, so, disable it. +# https://bugs.launchpad.net/charm-kubernetes-master/+bug/1938470 +signal.signal(signal.SIGCHLD, signal.SIG_DFL) + + +async def run(*args, timeout=10, **kwargs): + '''Run a CLI command. + + Returns retcode, stdout, and stderr (already decoded). + + If the process times out, the exit code will be 124 and stdout and stderr + will be empty. + + NOTE: + In Python 3.8+, the default process child watcher, ThreadedChildWatcher, + appears to have a race condition where it frequently attempts to wait for + the child process PID before it's visible, leading to a spurious warning + in the log about "Unknown child process", and a 255 exit code regardless + of what the child process actually exits with. The stdout and stderr will + still be available, however. + ''' + args = [str(arg) for arg in args] + kwargs.update( + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + async def _run(): + proc = await asyncio.create_subprocess_exec(*args, **kwargs) + stdout, stderr = await proc.communicate() + return proc.returncode, stdout.decode('utf8'), stderr.decode('utf8') + + try: + return await asyncio.wait_for(_run(), timeout=timeout) + except asyncio.TimeoutError: + app.logger.exception('Command timed out: {}'.format(' '.join(args))) + return 124, '', '' + + +async def kubectl(*args): + '''Run a kubectl CLI command with a config file. + + Returns retcode, stdout, and stderr. ''' # Try to use our service account kubeconfig; fall back to root if needed kubectl_cmd = Path('/snap/bin/kubectl') if not kubectl_cmd.is_file(): # Fall back to anywhere on the path if the snap isn't available kubectl_cmd = 'kubectl' - kubeconfig = '/root/.kube/config' - command = [str(kubectl_cmd), '--kubeconfig={}'.format(kubeconfig)] + list(args) - return check_output(command, timeout=10) + return await run(kubectl_cmd, '--kubeconfig=/root/.kube/config', *args) def log_secret(text, obj, hide=True): @@ -46,7 +92,7 @@ def log_secret(text, obj, hide=True): app.logger.debug('{}: {}'.format(text, log_obj)) -def check_token(token_review): +async def check_token(token_review): '''Populate user info if token is found in auth-related files.''' app.logger.info('Checking token') token_to_check = token_review['spec']['token'] @@ -54,28 +100,41 @@ def check_token(token_review): # If we have an admin token, short-circuit all other checks. This prevents us # from leaking our admin token to other authn services. admin_kubeconfig = Path('/root/.kube/config') - if admin_kubeconfig.exists(): - with admin_kubeconfig.open('r') as f: - data = safe_load(f) - try: - admin_token = data['users'][0]['user']['token'] - except (KeyError, ValueError): - # No admin kubeconfig; this is weird since we should always have an - # admin kubeconfig, but we shouldn't fail here in case there's - # something in known_tokens that should be validated. - pass - else: - if token_to_check == admin_token: - # We have a valid admin - token_review['status'] = { - 'authenticated': True, - 'user': { - 'username': 'admin', - 'uid': 'admin', - 'groups': ['system:masters'] - } - } - return True + data = None + try: + try: + data = safe_load(admin_kubeconfig.read_text()) + except Exception: + # Retry loading the file once, in case the charm was in the + # middle of rewriting it. See lp:1837930 for more info, but + # even without it being rewritten on every hook, there will + # always be a race condition to consider. + await asyncio.sleep(0.5) + data = safe_load(admin_kubeconfig.read_text()) + except YAMLError as e: + # we don't want to use logger.exception() or str(e) because it + # can leak tokens into the log + app.logger.error('Invalid kube config file: %s', type(e).__name__) + except Exception: + if not admin_kubeconfig.exists(): + app.logger.error('Missing kube config file') + elif data is None: + app.logger.error('Empty kube config file') + else: + app.logger.exception('Invalid kube config file') + else: + admin_token = data['users'][0]['user']['token'] + if token_to_check == admin_token: + # We have a valid admin + token_review['status'] = { + 'authenticated': True, + 'user': { + 'username': 'admin', + 'uid': 'admin', + 'groups': ['system:masters'] + } + } + return True # No admin? We're probably in an upgrade. Check an existing known_tokens.csv. csv_fields = ['token', 'username', 'user', 'groups'] @@ -102,119 +161,85 @@ def check_token(token_review): return False -def check_secrets(token_review): +async def check_secrets(token_review): '''Populate user info if token is found in k8s secrets.''' # Only check secrets if kube-apiserver is up - try: - output = check_call(['systemctl', 'is-active', 'snap.kube-apiserver.daemon']) - except CalledProcessError: - app.logger.info('Skipping secret check: kube-apiserver is not ready') - return False + app.logger.info('Checking secret') + token = token_review['spec']['token'] + + if token in app['secrets']: + token_review['status'] = { + 'authenticated': True, + 'user': app['secrets'][token], + } + return True else: - app.logger.info('Checking secret') - - token_to_check = token_review['spec']['token'] - try: - output = kubectl( - 'get', 'secrets', '-n', 'kube-system', '-o', 'json').decode('UTF-8') - except (CalledProcessError, TimeoutExpired) as e: - app.logger.info('Unable to load secrets: {}.'.format(e)) return False - secrets = json.loads(output) - if 'items' in secrets: - for secret in secrets['items']: - try: - data_b64 = secret['data'] - password_b64 = data_b64['password'].encode('UTF-8') - username_b64 = data_b64['username'].encode('UTF-8') - except (KeyError, TypeError): - # CK secrets will have populated 'data', but not all secrets do - continue - password = b64decode(password_b64).decode('UTF-8') - if token_to_check == password: - groups_b64 = data_b64['groups'].encode('UTF-8') \ - if 'groups' in data_b64 else b'' - - # NB: CK creates k8s secrets with the 'password' field set as - # uid::token. Split the decoded password so we can send a 'uid' back. - # If there is no delimiter, set uid == username. - # TODO: make the delimeter less magical so it doesn't get out of - # sync with the function that creates secrets in k8s-master.py. - username = uid = b64decode(username_b64).decode('UTF-8') - pw_delim = '::' - if pw_delim in password: - uid = password.rsplit(pw_delim, 1)[0] - groups = b64decode(groups_b64).decode('UTF-8').split(',') - token_review['status'] = { - 'authenticated': True, - 'user': { - 'username': username, - 'uid': uid, - 'groups': groups, - } - } - return True - return False - - -def check_aws_iam(token_review): +async def check_aws_iam(token_review): '''Check the request with an AWS IAM authn server.''' app.logger.info('Checking AWS IAM') # URL comes from /root/cdk/aws-iam-webhook.yaml - url = '{{ aws_iam_endpoint }}' - app.logger.debug('Forwarding to: {}'.format(url)) + app.logger.debug('Forwarding to: {}'.format(AWS_IAM_ENDPOINT)) - return forward_request(token_review, url) + return await forward_request(token_review, AWS_IAM_ENDPOINT) -def check_keystone(token_review): +async def check_keystone(token_review): '''Check the request with a Keystone authn server.''' app.logger.info('Checking Keystone') # URL comes from /root/cdk/keystone/webhook.yaml - url = '{{ keystone_endpoint }}' - app.logger.debug('Forwarding to: {}'.format(url)) + app.logger.debug('Forwarding to: {}'.format(KEYSTONE_ENDPOINT)) - return forward_request(token_review, url) + return await forward_request(token_review, KEYSTONE_ENDPOINT) -def check_custom(token_review): +async def check_custom(token_review): '''Check the request with a user-specified authn server.''' app.logger.info('Checking Custom Endpoint') # User will set the URL in k8s-master config - url = '{{ custom_authn_endpoint }}' - app.logger.debug('Forwarding to: {}'.format(url)) + app.logger.debug('Forwarding to: {}'.format(CUSTOM_AUTHN_ENDPOINT)) - return forward_request(token_review, url) + return await forward_request(token_review, CUSTOM_AUTHN_ENDPOINT) -def forward_request(json_req, url): +async def forward_request(json_req, url): '''Forward a JSON TokenReview request to a url. Returns True if the request is authenticated; False if the response is either invalid or authn has been denied. ''' timeout = 10 + resp_text = '' try: - try: - r = requests.post(url, json=json_req, timeout=timeout) - except requests.exceptions.SSLError: - app.logger.debug('SSLError with server; skipping cert validation') - r = requests.post(url, json=json_req, verify=False, timeout=timeout) - except Exception as e: - app.logger.debug('Failed to contact server: {}'.format(e)) + async with aiohttp.ClientSession() as session: + try: + async with session.post(url, json=json_req, timeout=timeout) as resp: + resp_text = await resp.text() + except aiohttp.ClientSSLError: + app.logger.debug('SSLError with server; skipping cert validation') + async with session.post(url, + json=json_req, + verify_ssl=False, + timeout=timeout) as resp: + resp_text = await resp.text() + except asyncio.TimeoutError: + app.logger.error('Timed out contacting server') + return False + except Exception: + app.logger.exception('Failed to contact server') return False # Check if the response is valid try: - resp = json.loads(r.text) + resp = json.loads(resp_text) 'authenticated' in resp['status'] except (KeyError, TypeError, ValueError): - log_secret(text='Invalid response from server', obj=r.text) + log_secret(text='Invalid response from server', obj=resp_text) return False # NB: When a forwarded request is authenticated, set the 'status' field to @@ -226,8 +251,21 @@ def forward_request(json_req, url): return False -@app.route('/{{ api_ver }}', methods=['POST']) -def webhook(): +def ack(req, **kwargs): + # Successful checks will set auth and user data in the 'req' dict + log_secret(text='ACK', obj=req) + return aiohttp.web.json_response(req, **kwargs) + + +def nak(req, **kwargs): + # Force unauthenticated, just in case + req.setdefault('status', {})['authenticated'] = False + log_secret(text='NAK', obj=req) + return aiohttp.web.json_response(req, **kwargs) + + +@routes.post('/{{ api_ver }}') +async def webhook(request): '''Listen on /$api_version for POST requests. For a POSTed TokenReview object, check every known authentication mechanism @@ -240,12 +278,15 @@ def webhook(): TokenReview object with 'authenticated: True' and user attributes if a token is found; otherwise, a TokenReview object with 'authenticated: False' ''' - # Log to gunicorn - glogger = logging.getLogger('gunicorn.error') - app.logger.handlers = glogger.handlers - app.logger.setLevel(glogger.level) + try: + req = await request.json() + except json.JSONDecodeError: + app.logger.debug('Unable to parse request') + return nak({}, status=400) + + # Make the request unauthenticated by deafult + req['status'] = {'authenticated': False} - req = request.json try: valid = True if (req['kind'] == 'TokenReview' and req['spec']['token']) else False @@ -256,31 +297,128 @@ def webhook(): log_secret(text='REQ', obj=req) else: log_secret(text='Invalid request', obj=req) - return '' # flask needs to return something that isn't None + return nak({}, status=400) - # Make the request unauthenticated by deafult - req['status'] = {'authenticated': False} + if await check_token(req): + return ack(req) - if ( - check_token(req) - or check_secrets(req) - {%- if aws_iam_endpoint %} - or check_aws_iam(req) - {%- endif %} - {%- if keystone_endpoint %} - or check_keystone(req) - {%- endif %} - {%- if custom_authn_endpoint %} - or check_custom(req) - {%- endif %} - ): - # Successful checks will set auth and user data in the 'req' dict - log_secret(text='ACK', obj=req) - else: - log_secret(text='NAK', obj=req) + if not app['secrets']: + # If secrets aren't yet available, none of the system accounts will be + # functional and thus neither will the cluster, so there's no point to + # going any further. Additionally, we don't want to accidentally leak + # system account tokens to external auth endpoints. + app.logger.warning('Secrets not yet available; aborting') + return nak(req) - return jsonify(req) + if await check_secrets(req): + return ack(req) + + if AWS_IAM_ENDPOINT and await check_aws_iam(req): + return ack(req) + + if KEYSTONE_ENDPOINT and await check_keystone(req): + return ack(req) + + if CUSTOM_AUTHN_ENDPOINT and await check_custom(req): + return ack(req) + + return nak(req) + + +@routes.post('/slow-test') +async def slow_test(request): + app.logger.debug('Slow request started') + await asyncio.sleep(5) + app.logger.debug('Slow request finished') + return aiohttp.web.json_response({'status': {'authenticated': False}}) + + +async def refresh_secrets(app): + app.logger.info('Refreshing secrets') + retcode, stdout, stderr = await run( + 'systemctl', 'is-active', 'snap.kube-apiserver.daemon' + ) + # See note in run() docstring above about exit 255. + if retcode not in (0, 255) or stdout.strip() != 'active': + app.logger.info('Skipping secret refresh: kube-apiserver is not ready ' + '({}, {})'.format(retcode, stdout.strip())) + return + + retcode, stdout, stderr = await kubectl( + 'get', 'secrets', '-n', 'kube-system', '-o', 'json' + ) + # See note in run() docstring above about exit 255. + if retcode not in (0, 255) or stderr: + app.logger.warning('Unable to load secrets ({}): {}'.format(retcode, stderr)) + return + + try: + secrets = json.loads(stdout) + except json.JSONDecodeError: + app.logger.exception('Unable to parse secrets') + return + + new_secrets = {} + for secret in secrets.get('items', []): + try: + data_b64 = secret['data'] + username_b64 = data_b64['username'].encode('UTF-8') + password_b64 = data_b64['password'].encode('UTF-8') + groups_b64 = data_b64.get('groups', '').encode('UTF-8') + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + + username = uid = b64decode(username_b64).decode('UTF-8') + password = b64decode(password_b64).decode('UTF-8') + groups = b64decode(groups_b64).decode('UTF-8').split(',') + + # NB: CK creates k8s secrets with the 'password' field set as + # uid::token. Split the decoded password so we can send a 'uid' back. + # If there is no delimiter, set uid == username. + # TODO: make the delimeter less magical so it doesn't get out of + # sync with the function that creates secrets in k8s-master.py. + pw_delim = '::' + if pw_delim in password: + uid = password.rsplit(pw_delim, 1)[0] + new_secrets[password] = { + 'username': username, + 'uid': uid, + 'groups': groups, + } + app['secrets'] = new_secrets + + +async def startup(app): + # Log to gunicorn + glogger = logging.getLogger('gunicorn.error') + app.logger.handlers = glogger.handlers + app.logger.setLevel(glogger.level) + + async def _task(): + while True: + try: + await refresh_secrets(app) + await asyncio.sleep(60) + except asyncio.CancelledError: + break + except Exception: + app.logger.exception('Failed to get secrets') + + app['secrets'] = {} + app['secrets_task'] = asyncio.ensure_future(_task()) + + +async def cleanup(app): + task = app.get('secrets_task') + task.cancel() + await task + + +app.add_routes(routes) +app.on_startup.append(startup) +app.on_cleanup.append(cleanup) if __name__ == '__main__': - app.run() + aiohttp.web.run_app(app) diff --git a/kubernetes-master/templates/cdk.master.auth-webhook.service b/kubernetes-master/templates/cdk.master.auth-webhook.service index a7bf0ed..d6e1515 100644 --- a/kubernetes-master/templates/cdk.master.auth-webhook.service +++ b/kubernetes-master/templates/cdk.master.auth-webhook.service @@ -1,6 +1,7 @@ [Unit] Description=CDK master auth webhook After=network.target +StartLimitIntervalSec=0 [Service] User=root @@ -15,8 +16,10 @@ ExecStart={{ charm_dir }}/../.venv/bin/gunicorn \ --log-level debug \ --pid {{ pidfile }} \ --workers {{ num_workers }} \ + --worker-class aiohttp.worker.GunicornWebWorker \ auth-webhook:app Restart=always +RestartSec=10 [Install] WantedBy=multi-user.target diff --git a/kubernetes-master/templates/rbd-persistent-volume.yaml b/kubernetes-master/templates/rbd-persistent-volume.yaml deleted file mode 100644 index 84248e5..0000000 --- a/kubernetes-master/templates/rbd-persistent-volume.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# JUJU Internal Template used to enlist RBD volumes from the -# `create-rbd-pv` action. This is a temporary file on disk to enlist resources. -apiVersion: v1 -kind: PersistentVolume -metadata: - name: {{ RBD_NAME }} -spec: - capacity: - storage: {{ RBD_SIZE }}M - accessModes: - - {{ PV_MODE }} - storageClassName: "rbd" - rbd: - monitors: - {% for host in monitors %} - - {{ host }} - {% endfor %} - pool: rbd - image: {{ RBD_NAME }} - user: admin - secretRef: - name: ceph-secret - fsType: {{ RBD_FS }} - readOnly: false - # persistentVolumeReclaimPolicy: Recycle diff --git a/kubernetes-master/tests/functional/conftest.py b/kubernetes-master/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-master/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-master/tests/functional/test_k8s_common.py b/kubernetes-master/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubernetes-master/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubernetes-master/tests/unit/conftest.py b/kubernetes-master/tests/unit/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-master/tests/unit/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-master/tests/unit/test_k8s_common.py b/kubernetes-master/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..0dcad31 --- /dev/null +++ b/kubernetes-master/tests/unit/test_k8s_common.py @@ -0,0 +1,122 @@ +import json +import string +from subprocess import CalledProcessError +from unittest.mock import Mock + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" diff --git a/kubernetes-master/version b/kubernetes-master/version index 1dea0b1..20817dd 100644 --- a/kubernetes-master/version +++ b/kubernetes-master/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/kubernetes-master/wheelhouse.txt b/kubernetes-master/wheelhouse.txt index dacf089..05d3d30 100644 --- a/kubernetes-master/wheelhouse.txt +++ b/kubernetes-master/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 @@ -15,7 +17,10 @@ wheel<0.34 netaddr<=0.7.19 # layer:snap -tenacity +# Newer versions of tenacity rely on `typing` which is in stdlib in +# python3.5 but not python3.4. We want to continue to support +# python3.4 (Trusty) +tenacity<5.0.4 # layer:vault-kv hvac @@ -24,6 +29,7 @@ netifaces psutil # kubernetes-master -flask>=1.0.0,<2.0.0 +aiohttp>=3.7.4,<4.0.0 gunicorn>=20.0.0,<21.0.0 +loadbalancer-interface diff --git a/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz b/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz deleted file mode 100644 index e264330..0000000 Binary files a/kubernetes-master/wheelhouse/Flask-1.1.2.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz b/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz deleted file mode 100644 index e92c86e..0000000 Binary files a/kubernetes-master/wheelhouse/Werkzeug-1.0.1.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/aiohttp-3.7.4.post0.tar.gz b/kubernetes-master/wheelhouse/aiohttp-3.7.4.post0.tar.gz new file mode 100644 index 0000000..bb0fc48 Binary files /dev/null and b/kubernetes-master/wheelhouse/aiohttp-3.7.4.post0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/async-timeout-3.0.1.tar.gz b/kubernetes-master/wheelhouse/async-timeout-3.0.1.tar.gz new file mode 100644 index 0000000..dfed0e0 Binary files /dev/null and b/kubernetes-master/wheelhouse/async-timeout-3.0.1.tar.gz differ diff --git a/kubernetes-master/wheelhouse/attrs-21.2.0.tar.gz b/kubernetes-master/wheelhouse/attrs-21.2.0.tar.gz new file mode 100644 index 0000000..c028019 Binary files /dev/null and b/kubernetes-master/wheelhouse/attrs-21.2.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/cached-property-1.5.2.tar.gz b/kubernetes-master/wheelhouse/cached-property-1.5.2.tar.gz new file mode 100644 index 0000000..501f2c0 Binary files /dev/null and b/kubernetes-master/wheelhouse/cached-property-1.5.2.tar.gz differ diff --git a/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz b/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz deleted file mode 100644 index 3023d0a..0000000 Binary files a/kubernetes-master/wheelhouse/certifi-2020.12.5.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/certifi-2021.10.8.tar.gz b/kubernetes-master/wheelhouse/certifi-2021.10.8.tar.gz new file mode 100644 index 0000000..9e1581b Binary files /dev/null and b/kubernetes-master/wheelhouse/certifi-2021.10.8.tar.gz differ diff --git a/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/kubernetes-master/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/charmhelpers-0.20.23.tar.gz b/kubernetes-master/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/kubernetes-master/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/kubernetes-master/wheelhouse/charset-normalizer-2.0.7.tar.gz b/kubernetes-master/wheelhouse/charset-normalizer-2.0.7.tar.gz new file mode 100644 index 0000000..61df022 Binary files /dev/null and b/kubernetes-master/wheelhouse/charset-normalizer-2.0.7.tar.gz differ diff --git a/kubernetes-master/wheelhouse/click-7.1.2.tar.gz b/kubernetes-master/wheelhouse/click-7.1.2.tar.gz deleted file mode 100644 index 698411c..0000000 Binary files a/kubernetes-master/wheelhouse/click-7.1.2.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz b/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz deleted file mode 100644 index 59d8fc7..0000000 Binary files a/kubernetes-master/wheelhouse/hvac-0.10.10.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/hvac-0.11.2.tar.gz b/kubernetes-master/wheelhouse/hvac-0.11.2.tar.gz new file mode 100644 index 0000000..6aa6982 Binary files /dev/null and b/kubernetes-master/wheelhouse/hvac-0.11.2.tar.gz differ diff --git a/kubernetes-master/wheelhouse/idna-2.10.tar.gz b/kubernetes-master/wheelhouse/idna-2.10.tar.gz deleted file mode 100644 index e9a9e03..0000000 Binary files a/kubernetes-master/wheelhouse/idna-2.10.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/idna-3.3.tar.gz b/kubernetes-master/wheelhouse/idna-3.3.tar.gz new file mode 100644 index 0000000..ff2bcbf Binary files /dev/null and b/kubernetes-master/wheelhouse/idna-3.3.tar.gz differ diff --git a/kubernetes-master/wheelhouse/idna-ssl-1.1.0.tar.gz b/kubernetes-master/wheelhouse/idna-ssl-1.1.0.tar.gz new file mode 100644 index 0000000..2380177 Binary files /dev/null and b/kubernetes-master/wheelhouse/idna-ssl-1.1.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz b/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz deleted file mode 100644 index 13644ac..0000000 Binary files a/kubernetes-master/wheelhouse/itsdangerous-1.1.0.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/loadbalancer_interface-1.1.1.tar.gz b/kubernetes-master/wheelhouse/loadbalancer_interface-1.1.1.tar.gz new file mode 100644 index 0000000..265133c Binary files /dev/null and b/kubernetes-master/wheelhouse/loadbalancer_interface-1.1.1.tar.gz differ diff --git a/kubernetes-master/wheelhouse/marshmallow-3.14.0.tar.gz b/kubernetes-master/wheelhouse/marshmallow-3.14.0.tar.gz new file mode 100644 index 0000000..14e4efc Binary files /dev/null and b/kubernetes-master/wheelhouse/marshmallow-3.14.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/marshmallow-enum-1.5.1.tar.gz b/kubernetes-master/wheelhouse/marshmallow-enum-1.5.1.tar.gz new file mode 100644 index 0000000..642941a Binary files /dev/null and b/kubernetes-master/wheelhouse/marshmallow-enum-1.5.1.tar.gz differ diff --git a/kubernetes-master/wheelhouse/multidict-5.2.0.tar.gz b/kubernetes-master/wheelhouse/multidict-5.2.0.tar.gz new file mode 100644 index 0000000..9563429 Binary files /dev/null and b/kubernetes-master/wheelhouse/multidict-5.2.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz b/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz deleted file mode 100644 index 97ea2e7..0000000 Binary files a/kubernetes-master/wheelhouse/netifaces-0.10.9.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/netifaces-0.11.0.tar.gz b/kubernetes-master/wheelhouse/netifaces-0.11.0.tar.gz new file mode 100644 index 0000000..3a35596 Binary files /dev/null and b/kubernetes-master/wheelhouse/netifaces-0.11.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/ops-1.2.0.tar.gz b/kubernetes-master/wheelhouse/ops-1.2.0.tar.gz new file mode 100644 index 0000000..2cb4358 Binary files /dev/null and b/kubernetes-master/wheelhouse/ops-1.2.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/ops_reactive_interface-1.0.1.tar.gz b/kubernetes-master/wheelhouse/ops_reactive_interface-1.0.1.tar.gz new file mode 100644 index 0000000..14f5ded Binary files /dev/null and b/kubernetes-master/wheelhouse/ops_reactive_interface-1.0.1.tar.gz differ diff --git a/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz b/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/kubernetes-master/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/pyaml-21.10.1.tar.gz b/kubernetes-master/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubernetes-master/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz b/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz deleted file mode 100644 index 9dcfcf2..0000000 Binary files a/kubernetes-master/wheelhouse/requests-2.25.1.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/requests-2.26.0.tar.gz b/kubernetes-master/wheelhouse/requests-2.26.0.tar.gz new file mode 100644 index 0000000..101dc79 Binary files /dev/null and b/kubernetes-master/wheelhouse/requests-2.26.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/six-1.15.0.tar.gz b/kubernetes-master/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/kubernetes-master/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/six-1.16.0.tar.gz b/kubernetes-master/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/kubernetes-master/wheelhouse/six-1.16.0.tar.gz differ diff --git a/kubernetes-master/wheelhouse/tenacity-5.0.3.tar.gz b/kubernetes-master/wheelhouse/tenacity-5.0.3.tar.gz new file mode 100644 index 0000000..c7d05ba Binary files /dev/null and b/kubernetes-master/wheelhouse/tenacity-5.0.3.tar.gz differ diff --git a/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz b/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz deleted file mode 100644 index 2050c4d..0000000 Binary files a/kubernetes-master/wheelhouse/tenacity-7.0.0.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/typing_extensions-3.10.0.2.tar.gz b/kubernetes-master/wheelhouse/typing_extensions-3.10.0.2.tar.gz new file mode 100644 index 0000000..dad7a2c Binary files /dev/null and b/kubernetes-master/wheelhouse/typing_extensions-3.10.0.2.tar.gz differ diff --git a/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz b/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz deleted file mode 100644 index 4d693e7..0000000 Binary files a/kubernetes-master/wheelhouse/urllib3-1.26.4.tar.gz and /dev/null differ diff --git a/kubernetes-master/wheelhouse/urllib3-1.26.7.tar.gz b/kubernetes-master/wheelhouse/urllib3-1.26.7.tar.gz new file mode 100644 index 0000000..990abe6 Binary files /dev/null and b/kubernetes-master/wheelhouse/urllib3-1.26.7.tar.gz differ diff --git a/kubernetes-master/wheelhouse/yarl-1.7.0.tar.gz b/kubernetes-master/wheelhouse/yarl-1.7.0.tar.gz new file mode 100644 index 0000000..3acaed4 Binary files /dev/null and b/kubernetes-master/wheelhouse/yarl-1.7.0.tar.gz differ diff --git a/kubernetes-worker/.build.manifest b/kubernetes-worker/.build.manifest index 4bf868d..6f82c60 100644 --- a/kubernetes-worker/.build.manifest +++ b/kubernetes-worker/.build.manifest @@ -1,137 +1,132 @@ { "layers": [ { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56", "url": "layer:options" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "623e69c7b432456fd4364f6e1835424fd6b5425e", + "branch": "refs/heads/master", + "rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083", "url": "layer:basic" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275", "url": "layer:debug" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "85d7cc4f7180d19df20e264358e920004cec192b", + "branch": "refs/heads/master", + "rev": "d3acdf209cbaf5b732e9aba621778a0f56dbaeb9", "url": "layer:snap" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f", "url": "layer:leadership" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "f491ebe32b503c9712d2f8cd602dcce18f4aab46", "url": "layer:metrics" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e", "url": "layer:nagios" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fb46dec78d390571753d21876bbba689bbbca9e4", "url": "layer:tls-client" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688", "url": "layer:cdk-service-kicker" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "023c67941e18663a4df49f53edba809f43ba5069", "url": "layer:cis-benchmark" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "fa27fc93e0b08000963e83a6bfe49812d890dfcf", "url": "layer:coordinator" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "763297a075b3654f261af20c84b940d87f55354e", + "branch": "refs/heads/master", + "rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156", "url": "layer:kubernetes-common" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "809f06c6f6521be59e21859eaebeccd13f4d8c28", "url": "layer:kubernetes-master-worker-base" }, { - "branch": "refs/heads/stable", - "rev": "39ba9cb410333cb3b5693e83407a865fef96e45f", + "branch": "refs/heads/master", + "rev": "c753ea9346c2503a85464d11740bce60ff8f6a66", "url": "kubernetes-worker" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc", "url": "interface:nrpe-external-master" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d9850016d930a6d507b9fd45e2598d327922b140", "url": "interface:tls-certificates" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "6f927f10b97f45c566481cf57a29d433f17373e1", "url": "interface:container-runtime" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09", "url": "interface:http" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b", + "branch": "refs/heads/master", + "rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1", "url": "interface:kubernetes-cni" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "9bc32742b7720a755ada9526424e5d80092e1536", - "url": "interface:kube-dns" - }, - { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "2236a52be495a45b8f492bae37bbba50e468ef42", + "branch": "refs/heads/master", + "rev": "534310f5bca8edde02cadaf6ac42231cea0b040b", "url": "interface:kube-control" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d8d8c7ef17c99ad53383f3cabf4cf5c8191d16f7", "url": "interface:aws-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d8f093cb2930edf5f93678253dca2da70b73b4fb", "url": "interface:gcp-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "0d7a994f04b9e92ed847829ce8349b1a9c672e47", "url": "interface:openstack-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d5caea55ced6785f391215ee457c3a964eaf3f4b", "url": "interface:vsphere-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", - "rev": "45b79107f7bd5f14b3b956d1f45f659a567b0999", + "branch": "refs/heads/master", + "rev": "8d2202e433d7c188de4df2fd4bddb355193e93ac", "url": "interface:azure-integration" }, { - "branch": "refs/heads/master\nrefs/heads/stable", + "branch": "refs/heads/master", "rev": "d5a2526fec9c3e8581f18b56e84a86871583e080", "url": "interface:mount" } @@ -142,10 +137,10 @@ "dynamic", "unchecked" ], - ".github/workflows/build.yml": [ + ".github/workflows/main.yml": [ "kubernetes-worker", "static", - "beab3a0e5eefddd3b3f3c11892725e819343b689b13a1872eaac1eb2d9ea083b" + "ac49b46a293a04f1a25c86769b054ec4cee88de9bf0354f26627fb99f044a711" ], ".gitignore": [ "kubernetes-worker", @@ -153,14 +148,9 @@ "e028ad966843fa4e09963c008d1200117caf1a42163c70795d9c55406f801d8c" ], ".travis.yml": [ - "kubernetes-worker", + "layer:cis-benchmark", "static", - "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" - ], - ".travis/profile-update.yaml": [ - "layer:basic", - "static", - "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b" + "b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2" ], "CONTRIBUTING.md": [ "kubernetes-worker", @@ -178,9 +168,9 @@ "f02fd85a4171482f6bb1d6f87fe0704d3a2da93eca04afe39a0310a00c409902" ], "Makefile": [ - "kubernetes-worker", + "layer:basic", "static", - "b000b0f022a76c3d3ee955c3d467af17ed9a38235677dfd9193c1b0a843d4050" + "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301" ], "README.md": [ "kubernetes-worker", @@ -190,7 +180,7 @@ "actions.yaml": [ "kubernetes-worker", "dynamic", - "fc18958b027e694ba41fcb598ba7374e38be9b72526e2dae73473df5f630d489" + "e43fb57f67cffdf43f1af01d53154f4a21b5905b448dd0780a87059c97625b8e" ], "actions/cis-benchmark": [ "layer:cis-benchmark", @@ -212,11 +202,6 @@ "static", "ba4a19dc800ff6381367ad2bd84b0ad0c06180a77834c762750a4bdedb9ff366" ], - "actions/registry": [ - "kubernetes-worker", - "static", - "178024c8442ad2d6ffd6f09b4f0278792a74933ac7caa611f897de9e5b04473b" - ], "actions/resume": [ "kubernetes-worker", "static", @@ -245,7 +230,7 @@ "config.yaml": [ "kubernetes-worker", "dynamic", - "b7e5bf623ac413025b9e868845c59e4b8b437506df3f12e41a43ecea751761e7" + "bd2f6fe7b40e230014051cf982f0d1cdb8418555a006c3210f48f353675339bb" ], "copyright": [ "kubernetes-worker", @@ -627,31 +612,6 @@ "dynamic", "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" ], - "hooks/kube-dns-relation-broken": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/kube-dns-relation-changed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/kube-dns-relation-created": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/kube-dns-relation-departed": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], - "hooks/kube-dns-relation-joined": [ - "layer:basic", - "dynamic", - "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7" - ], "hooks/leader-elected": [ "layer:coordinator", "static", @@ -855,7 +815,7 @@ "hooks/relations/azure-integration/provides.py": [ "interface:azure-integration", "static", - "a3a1de7f79c5f2cc37f2dff450d8e9b2ce36c63c0328bb6bedd2ade7519a7442" + "33af701c7abd51e869de945c1f032749136c66560bb604e8e72521dc9d7e495b" ], "hooks/relations/azure-integration/pydocmd.yml": [ "interface:azure-integration", @@ -865,7 +825,7 @@ "hooks/relations/azure-integration/requires.py": [ "interface:azure-integration", "static", - "112bfa057cdcf91a812dea080330e9323f4d7e4b1bcacfd69b3ad95dd2274cbb" + "2e60fecf8bc65d84124742d0833afc90d2e839f5dfa2923e8d1849063c51f47a" ], "hooks/relations/container-runtime/.gitignore": [ "interface:container-runtime", @@ -1020,47 +980,22 @@ "hooks/relations/kube-control/provides.py": [ "interface:kube-control", "static", - "5dffb8504d0993ad756b0631fd82ef465dc9127641b448bea76596fc6f3e55c4" + "08e090bb3ad51e5825590ad0dee077288648b171764480afc20205c740fa15be" ], "hooks/relations/kube-control/requires.py": [ "interface:kube-control", "static", - "496ed9b2d4f6fef2e1e26b53b8f8c97e67b9a96b4fcfcb40ef671d2469b983e3" + "a064ad0b75081439faeda7fb948934bfd86a7ab2079d25b2ad47aa5fa32c2a6f" ], - "hooks/relations/kube-dns/README.md": [ - "interface:kube-dns", + "hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [ + "interface:kubernetes-cni", "static", - "f02265c0931c5582cbad911050ee1578c370e4ecaffdbf56d11505f97ce44fee" - ], - "hooks/relations/kube-dns/__init__.py": [ - "interface:kube-dns", - "static", - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - ], - "hooks/relations/kube-dns/interface.yaml": [ - "interface:kube-dns", - "static", - "e4ca8faafe4cce43eed862d35346780df4cba4eb243baaf5aecd891514deb26d" - ], - "hooks/relations/kube-dns/provides.py": [ - "interface:kube-dns", - "static", - "f0ea4f0610779a70860d5257f0760f62ea2ec682c5f005ba5afff92c9824aa36" - ], - "hooks/relations/kube-dns/requires.py": [ - "interface:kube-dns", - "static", - "38b819b7ee98c3c38142d2cc8122dedd9d8c0f34767c5cc11392a564f38db370" + "d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c" ], "hooks/relations/kubernetes-cni/.gitignore": [ "interface:kubernetes-cni", "static", - "cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e" - ], - "hooks/relations/kubernetes-cni/.travis.yml": [ - "interface:kubernetes-cni", - "static", - "c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829" + "0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe" ], "hooks/relations/kubernetes-cni/README.md": [ "interface:kubernetes-cni", @@ -1080,12 +1015,12 @@ "hooks/relations/kubernetes-cni/provides.py": [ "interface:kubernetes-cni", "static", - "4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e" + "e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee" ], "hooks/relations/kubernetes-cni/requires.py": [ "interface:kubernetes-cni", "static", - "c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa" + "45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe" ], "hooks/relations/mount/.gitignore": [ "interface:mount", @@ -1385,7 +1320,7 @@ "layer.yaml": [ "kubernetes-worker", "dynamic", - "cd6bd7d1a594dbc399906e13d9e0af3b05234bbf92ef7e83c786b465fd652e43" + "9ad8956799dcd00d6ab47306e99ea638f469dddf8f3f0a75f3436e3424ec056b" ], "lib/charms/coordinator.py": [ "layer:coordinator", @@ -1400,7 +1335,7 @@ "lib/charms/layer/basic.py": [ "layer:basic", "static", - "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108" + "98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9" ], "lib/charms/layer/execd.py": [ "layer:basic", @@ -1410,7 +1345,7 @@ "lib/charms/layer/kubernetes_common.py": [ "layer:kubernetes-common", "static", - "826650823a9af745e8a57defba66d1f2fe1c735f0fe64d282cf528ca65272101" + "29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b" ], "lib/charms/layer/nagios.py": [ "layer:nagios", @@ -1425,7 +1360,7 @@ "lib/charms/layer/snap.py": [ "layer:snap", "static", - "1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be" + "f278a3b06a1604e1c59f107d2ff3e9f5705e3c6c7be7a012c1a500d0fc8925df" ], "lib/charms/layer/tls_client.py": [ "layer:tls-client", @@ -1450,7 +1385,7 @@ "metadata.yaml": [ "kubernetes-worker", "dynamic", - "087372af1fc0a5a22415a0517ee159e1ccae2c4ee67005ba8ac67db6eb0979af" + "1aa8d365447c86fe0ab73979d0390728e771ffe7cdace09b97de4119d5ee2e69" ], "metrics.yaml": [ "kubernetes-worker", @@ -1480,7 +1415,7 @@ "reactive/kubernetes_worker.py": [ "kubernetes-worker", "static", - "75969472e7f0647befc465b3c2cd7010f239d9d51d1f61b491162353d6c326b3" + "384ce90f41b0abe38c5cafcbf8b485ad7cb31f093c16db56eb1d3405208499bc" ], "reactive/leadership.py": [ "layer:leadership", @@ -1497,31 +1432,11 @@ "static", "08e850e401d2004523dca6b5e6bc47c33d558bf575dd55969491e11cd3ed98c8" ], - "registry-configmap.yaml": [ - "kubernetes-worker", - "static", - "1558fde27b806faefe57d0f6bf2a2a28fd5909501be6776fcb4b3e1242471fea" - ], "requirements.txt": [ "layer:basic", "static", "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804" ], - "script/bootstrap": [ - "kubernetes-worker", - "static", - "1985d9a07e8d764351530f6eb1b81bef6a4c035dc75422c03f4672ceaf1a4c18" - ], - "script/build": [ - "kubernetes-worker", - "static", - "e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633" - ], - "script/upload": [ - "kubernetes-worker", - "static", - "aa13345e5f6873df26fb1705d1a1d51584fb32805329ac2d7a11f8ad7cbf4569" - ], "setup.py": [ "layer:snap", "static", @@ -1537,6 +1452,11 @@ "static", "c2d3977fa89d453f0f13a8a823621c44bb642ec7392d8b7462b631864f665029" ], + "templates/cdk.auth-webhook-secret.yaml": [ + "layer:kubernetes-common", + "static", + "efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec" + ], "templates/default-http-backend.yaml": [ "kubernetes-worker", "static", @@ -1545,7 +1465,7 @@ "templates/ingress-daemon-set.yaml": [ "kubernetes-worker", "static", - "e3fd0181d69058134f6afbd18374be0822e518f8c169d052d041f2a342974c09" + "c3b7e7d95c8a4cd0079145be797346b77a64f3b48b808cddbfdc33787b48d316" ], "templates/microbot-example.yaml": [ "kubernetes-worker", @@ -1555,32 +1475,47 @@ "templates/nagios_plugin.py": [ "kubernetes-worker", "static", - "636ca61f46749a762e165e9b13fc3da8823133af5c98b90dd33e7365171b84a4" + "8b425bb29ed41ee1b1c2fddc7acf5f24f5c6a0cf7432c86cf8486434032fcb14" ], "templates/nfs-provisioner.yaml": [ "kubernetes-worker", "static", "e3ee7c995c9a3624daffdc9a09467e9e274b38a4bb6c3851d928bf7bf1151fac" ], - "templates/registry.yaml": [ - "kubernetes-worker", + "tests/functional/conftest.py": [ + "layer:kubernetes-common", "static", - "75ef1f3d765a94e8b0c19a5e63ebf5df6788f91dbbecc4bf587695fecd63da87" + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/functional/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50" + ], + "tests/unit/conftest.py": [ + "layer:kubernetes-common", + "static", + "fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c" + ], + "tests/unit/test_k8s_common.py": [ + "layer:kubernetes-common", + "static", + "da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4" ], "tox.ini": [ - "layer:snap", + "layer:kubernetes-common", "static", - "4db933f2c03cda5e330db64806f0a06f6f6bc608e0db88c9c74b7171fc054ad1" + "70b0bf95842031247336c8734981b72d991d64d108f19697429abd80c8739877" ], "version": [ "kubernetes-worker", "dynamic", - "941f7f7d1878f1ab5754c308ddd1eba3c788e87c6350872cbc1be0863de33549" + "fd5e95ef69d8cf8fc1f89f04cfe6d6f8a97cf271f75e20f285b9e1efbdc4ba79" ], "wheelhouse.txt": [ "kubernetes-worker", "dynamic", - "6994129978e2e9b71815776a834a53cda7889390700c33c5478d5546d083dc3b" + "4062748728aabda25a3630f994d7801b1578a0a98dd5614b77251cd3e2ca8f57" ], "wheelhouse/Jinja2-2.10.1.tar.gz": [ "layer:basic", @@ -1588,7 +1523,7 @@ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013" ], "wheelhouse/MarkupSafe-1.1.1.tar.gz": [ - "__pip__", + "layer:basic", "dynamic", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b" ], @@ -1602,10 +1537,10 @@ "dynamic", "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c" ], - "wheelhouse/charmhelpers-0.20.21.tar.gz": [ + "wheelhouse/charmhelpers-0.20.23.tar.gz": [ "layer:basic", "dynamic", - "37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c" + "59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e" ], "wheelhouse/charms.reactive-1.4.1.tar.gz": [ "layer:basic", @@ -1632,10 +1567,10 @@ "dynamic", "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1" ], - "wheelhouse/pyaml-20.4.0.tar.gz": [ + "wheelhouse/pyaml-21.10.1.tar.gz": [ "__pip__", "dynamic", - "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71" + "c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383" ], "wheelhouse/setuptools-41.6.0.zip": [ "layer:basic", @@ -1647,15 +1582,15 @@ "dynamic", "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a" ], - "wheelhouse/six-1.15.0.tar.gz": [ + "wheelhouse/six-1.16.0.tar.gz": [ "__pip__", "dynamic", - "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259" + "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" ], - "wheelhouse/tenacity-7.0.0.tar.gz": [ + "wheelhouse/tenacity-5.0.3.tar.gz": [ "layer:snap", "dynamic", - "5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1" + "24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b" ], "wheelhouse/wheel-0.33.6.tar.gz": [ "layer:basic", diff --git a/kubernetes-worker/.github/workflows/build.yml b/kubernetes-worker/.github/workflows/build.yml deleted file mode 100644 index f3a6191..0000000 --- a/kubernetes-worker/.github/workflows/build.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Builds kubernetes-worker charm -on: [push, pull_request] - -jobs: - build: - name: Build charm - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: '3.8' - - name: Run build - run: | - make charm diff --git a/kubernetes-worker/.github/workflows/main.yml b/kubernetes-worker/.github/workflows/main.yml new file mode 100644 index 0000000..6998dbc --- /dev/null +++ b/kubernetes-worker/.github/workflows/main.yml @@ -0,0 +1,60 @@ +name: Test Suite +on: [pull_request] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run lint + run: tox -vve lint + unit-test: + name: Unit Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Dependencies + run: | + pip install tox + - name: Run test + run: tox -vve unit + integration-test: + name: Integration test with VMWare + runs-on: self-hosted + timeout-minutes: 360 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Setup operator environment + uses: charmed-kubernetes/actions-operator@master + with: + provider: vsphere + credentials-yaml: ${{ secrets.CREDENTIALS_YAML }} + clouds-yaml: ${{ secrets.CLOUDS_YAML }} + bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764" + - name: Run test + run: tox -e integration diff --git a/kubernetes-worker/.travis.yml b/kubernetes-worker/.travis.yml index d2be8be..66d8e1f 100644 --- a/kubernetes-worker/.travis.yml +++ b/kubernetes-worker/.travis.yml @@ -1,8 +1,6 @@ language: python python: - "3.5" - - "3.6" - - "3.7" install: - pip install tox-travis script: diff --git a/kubernetes-worker/.travis/profile-update.yaml b/kubernetes-worker/.travis/profile-update.yaml deleted file mode 100644 index 57f96eb..0000000 --- a/kubernetes-worker/.travis/profile-update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -config: {} -description: Default LXD profile - updated -devices: - eth0: - name: eth0 - parent: lxdbr0 - nictype: bridged - type: nic - root: - path: / - pool: default - type: disk diff --git a/kubernetes-worker/Makefile b/kubernetes-worker/Makefile index 3f0429f..a1ad3a5 100644 --- a/kubernetes-worker/Makefile +++ b/kubernetes-worker/Makefile @@ -1,18 +1,24 @@ -CHANNEL ?= unpublished -CHARM := kubernetes-worker +#!/usr/bin/make -setup-env: - bash script/bootstrap +all: lint unit_test -charm: setup-env - bash script/build -upload: -ifndef NAMESPACE - $(error NAMESPACE is not set) -endif +.PHONY: clean +clean: + @rm -rf .tox - env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload +.PHONY: apt_prereqs +apt_prereqs: + @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip) + @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox) -.phony: charm upload setup-env -all: charm +.PHONY: lint +lint: apt_prereqs + @tox --notest + @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests) + @charm proof + +.PHONY: unit_test +unit_test: apt_prereqs + @echo Starting tests... + tox diff --git a/kubernetes-worker/actions.yaml b/kubernetes-worker/actions.yaml index 5f774ac..f07e4ff 100644 --- a/kubernetes-worker/actions.yaml +++ b/kubernetes-worker/actions.yaml @@ -73,36 +73,3 @@ "description": "Number of microbots to launch in Kubernetes." "upgrade": "description": "Upgrade the kubernetes snaps" -"registry": - "description": | - Create a private Docker registry. - DEPRECATED: See https://ubuntu.com/kubernetes/docs/docker-registry - "params": - "htpasswd": - "type": "string" - "description": "base64 encoded htpasswd file used for authentication." - "htpasswd-plain": - "type": "string" - "description": "base64 encoded plaintext version of the htpasswd file, needed\ - \ by docker daemons to authenticate to the registry." - "tlscert": - "type": "string" - "description": "base64 encoded TLS certificate for the registry. Common Name\ - \ must match the domain name of the registry." - "tlskey": - "type": "string" - "description": "base64 encoded TLS key for the registry." - "domain": - "type": "string" - "description": "The domain name for the registry. Must match the Common Name\ - \ of the certificate." - "ingress": - "type": "boolean" - "default": !!bool "false" - "description": "Create an Ingress resource for the registry (or delete resource\ - \ object if \"delete\" is True)" - "delete": - "type": "boolean" - "default": !!bool "false" - "description": "Remove a registry replication controller, service, and ingress\ - \ if True." diff --git a/kubernetes-worker/actions/registry b/kubernetes-worker/actions/registry deleted file mode 100755 index eb95d05..0000000 --- a/kubernetes-worker/actions/registry +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/local/sbin/charm-env python3 -# -# For a usage examples, see README.md -# -# TODO -# -# - make the action idempotent (i.e. if you run it multiple times, the first -# run will create/delete the registry, and the reset will be a no-op and won't -# error out) -# -# - take only a plain authentication file, and create the encrypted version in -# the action -# -# - validate the parameters (make sure tlscert is a certificate, that tlskey is a -# proper key, etc) -# -# - when https://bugs.launchpad.net/juju/+bug/1661015 is fixed, handle the -# base64 encoding the parameters in the action itself - -import os -import sys - -from base64 import b64encode - -from charmhelpers.core.hookenv import action_get -from charmhelpers.core.hookenv import action_set -from charms.templating.jinja2 import render -from charms.reactive import endpoint_from_flag -from subprocess import call, check_output - -os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') - -deletion = action_get('delete') - -context = {} - -arch = check_output(['dpkg', '--print-architecture']).rstrip() -context['arch'] = arch.decode('utf-8') - -# This action was deprecated in 1.17. -action_set({ - 'notice': - ('DEPRECATED: See https://ubuntu.com/kubernetes/docs/docker-registry ' - 'for supported container registry options.') -}) - -# These config options must be defined in the case of a creation -param_error = False -for param in ('tlscert', 'tlskey', 'domain', 'htpasswd', 'htpasswd-plain'): - value = action_get(param) - if not value and not deletion: - key = "registry-create-parameter-{}".format(param) - error = "failure, parameter {} is required".format(param) - action_set({key: error}) - param_error = True - - context[param] = value - -# Create the dockercfg template variable -dockercfg = '{"%s": {"auth": "%s", "email": "root@localhost"}}' % \ - (context['domain'], context['htpasswd-plain']) -context['dockercfg'] = b64encode(dockercfg.encode()).decode('ASCII') - -if param_error: - sys.exit(0) - -# This one is either true or false, no need to check if it has a "good" value. -context['ingress'] = action_get('ingress') - -# Declare a kubectl template when invoking kubectl -kubectl = ['kubectl', '--kubeconfig=/root/.kube/config'] - -# Remove deployment if requested -if deletion: - resources = ['svc/kube-registry', 'rc/kube-registry-v0', 'secrets/registry-tls-data', - 'secrets/registry-auth-data', 'secrets/registry-access'] - - if action_get('ingress'): - resources.append('ing/registry-ing') - - delete_command = kubectl + ['delete', '--ignore-not-found=true'] + resources - delete_response = call(delete_command) - if delete_response == 0: - action_set({'registry-delete': 'success'}) - else: - action_set({'registry-delete': 'failure'}) - - sys.exit(0) - -kube_control = endpoint_from_flag('kube-control.registry_location.available') -if kube_control: - registry_location = kube_control.get_registry_location() - context['registry'] = registry_location - -# Creation request -render('registry.yaml', '/root/cdk/addons/registry.yaml', - context) - -create_command = kubectl + ['create', '-f', - '/root/cdk/addons/registry.yaml'] - -create_response = call(create_command) - -if create_response == 0: - action_set({'registry-create': 'success'}) - - # Create a ConfigMap if it doesn't exist yet, else patch it. - # A ConfigMap is needed to change the default value for nginx' client_max_body_size. - # The default is 1MB, and this is the maximum size of images that can be - # pushed on the registry. 1MB images aren't useful, so we bump this value to 1024MB. - cm_name = 'nginx-load-balancer-conf' - check_cm_command = kubectl + ['get', 'cm', cm_name] - check_cm_response = call(check_cm_command) - - if check_cm_response == 0: - # There is an existing ConfigMap, patch it - patch = '{"data":{"body-size":"1024m"}}' - patch_cm_command = kubectl + ['patch', 'cm', cm_name, '-p', patch] - patch_cm_response = call(patch_cm_command) - - if patch_cm_response == 0: - action_set({'configmap-patch': 'success'}) - else: - action_set({'configmap-patch': 'failure'}) - - else: - # No existing ConfigMap, create it - render('registry-configmap.yaml', '/root/cdk/addons/registry-configmap.yaml', - context) - create_cm_command = kubectl + ['create', '-f', '/root/cdk/addons/registry-configmap.yaml'] - create_cm_response = call(create_cm_command) - - if create_cm_response == 0: - action_set({'configmap-create': 'success'}) - else: - action_set({'configmap-create': 'failure'}) - - # Patch the "default" serviceaccount with an imagePullSecret. - # This will allow the docker daemons to authenticate to our private - # registry automatically - patch = '{"imagePullSecrets":[{"name":"registry-access"}]}' - patch_sa_command = kubectl + ['patch', 'sa', 'default', '-p', patch] - patch_sa_response = call(patch_sa_command) - - if patch_sa_response == 0: - action_set({'serviceaccount-patch': 'success'}) - else: - action_set({'serviceaccount-patch': 'failure'}) - - -else: - action_set({'registry-create': 'failure'}) diff --git a/kubernetes-worker/config.yaml b/kubernetes-worker/config.yaml index 69008f3..82c578b 100644 --- a/kubernetes-worker/config.yaml +++ b/kubernetes-worker/config.yaml @@ -97,14 +97,9 @@ "description": | Labels can be used to organize and to select subsets of nodes in the cluster. Declare node labels in key=value format, separated by spaces. - "allow-privileged": - "type": "string" - "default": "true" - "description": | - This option is now deprecated and has no effect. "channel": "type": "string" - "default": "1.21/stable" + "default": "1.23/edge" "description": | Snap channel to install Kubernetes worker services from "require-manual-upgrade": diff --git a/kubernetes-worker/hooks/relations/azure-integration/provides.py b/kubernetes-worker/hooks/relations/azure-integration/provides.py index e0d596e..5ff7d3a 100644 --- a/kubernetes-worker/hooks/relations/azure-integration/provides.py +++ b/kubernetes-worker/hooks/relations/azure-integration/provides.py @@ -136,13 +136,21 @@ class IntegrationRequest: def send_additional_metadata(self, resource_group_location, vnet_name, vnet_resource_group, - subnet_name, security_group_name): + subnet_name, security_group_name, + security_group_resource_group, + use_managed_identity=True, aad_client=None, + aad_secret=None, tenant_id=None): self._to_publish.update({ 'resource-group-location': resource_group_location, 'vnet-name': vnet_name, 'vnet-resource-group': vnet_resource_group, 'subnet-name': subnet_name, 'security-group-name': security_group_name, + 'security-group-resource-group': security_group_resource_group, + 'use-managed-identity': use_managed_identity, + 'aad-client': aad_client, + 'aad-client-secret': aad_secret, + 'tenant-id': tenant_id }) @property diff --git a/kubernetes-worker/hooks/relations/azure-integration/requires.py b/kubernetes-worker/hooks/relations/azure-integration/requires.py index 62f2b01..600d69e 100644 --- a/kubernetes-worker/hooks/relations/azure-integration/requires.py +++ b/kubernetes-worker/hooks/relations/azure-integration/requires.py @@ -211,8 +211,24 @@ class AzureIntegrationRequires(Endpoint): return requested and requested == completed @property - def credentials(self): - return self._received['credentials'] + def security_group_resource_group(self): + return self._received['security-group-resource-group'] + + @property + def managed_identity(self): + return self._received['use-managed-identity'] + + @property + def aad_client_id(self): + return self._received['aad-client'] + + @property + def aad_client_secret(self): + return self._received['aad-client-secret'] + + @property + def tenant_id(self): + return self._received['tenant-id'] def _request(self, keyvals): alphabet = string.ascii_letters + string.digits diff --git a/kubernetes-worker/hooks/relations/kube-control/provides.py b/kubernetes-worker/hooks/relations/kube-control/provides.py index 9d3a829..918ace1 100644 --- a/kubernetes-worker/hooks/relations/kube-control/provides.py +++ b/kubernetes-worker/hooks/relations/kube-control/provides.py @@ -150,3 +150,11 @@ class KubeControlProvider(Endpoint): """ for relation in self.relations: relation.to_publish['default-cni'] = default_cni + + def set_api_endpoints(self, endpoints): + """ + Send the list of API endpoint URLs to which workers should connect. + """ + endpoints = sorted(endpoints) + for relation in self.relations: + relation.to_publish['api-endpoints'] = endpoints diff --git a/kubernetes-worker/hooks/relations/kube-control/requires.py b/kubernetes-worker/hooks/relations/kube-control/requires.py index 72ce1f6..a0c3b0d 100644 --- a/kubernetes-worker/hooks/relations/kube-control/requires.py +++ b/kubernetes-worker/hooks/relations/kube-control/requires.py @@ -48,6 +48,9 @@ class KubeControlRequirer(Endpoint): toggle_flag( self.expand_name('{endpoint_name}.default_cni.available'), self.is_joined and self.get_default_cni() is not None) + toggle_flag( + self.expand_name('{endpoint_name}.api_endpoints.available'), + self.is_joined and self.get_api_endpoints()) def get_auth_credentials(self, user): """ @@ -147,3 +150,12 @@ class KubeControlRequirer(Endpoint): Default CNI network to use. """ return self.all_joined_units.received['default-cni'] + + def get_api_endpoints(self): + """ + Returns a list of API endpoint URLs. + """ + endpoints = set() + for unit in self.all_joined_units: + endpoints.update(unit.received['api-endpoints'] or []) + return sorted(endpoints) diff --git a/kubernetes-worker/hooks/relations/kube-dns/README.md b/kubernetes-worker/hooks/relations/kube-dns/README.md deleted file mode 100644 index 15ce8bb..0000000 --- a/kubernetes-worker/hooks/relations/kube-dns/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Kube-DNS - -This interface allows a DNS provider, such as CoreDNS, to provide name -resolution for a Kubernetes cluster. - -(Note: this interface was previously used by the Kubernetes Master charm to -communicate the DNS provider info to the Kubernetes Worker charm, but that -usage was folded into the `kube-control` interface.) - - -# Provides - -The provider should look for the `{endpoint_name}.connected` flag and call -the `set_dns_info` method with the `domain`, `sdn_ip`, and `port` info (note: -these must be provided as keyword arguments). - -# Requires - -The requirer should look for the `{endpoint_name}.available` flag and call the -`details` method, which will return a dictionary with the `domain`, `sdn-ip`, -and `port` keys. diff --git a/kubernetes-worker/hooks/relations/kube-dns/interface.yaml b/kubernetes-worker/hooks/relations/kube-dns/interface.yaml deleted file mode 100644 index 2de32b0..0000000 --- a/kubernetes-worker/hooks/relations/kube-dns/interface.yaml +++ /dev/null @@ -1,4 +0,0 @@ -name: kube-dns -summary: provides the kubernetes dns settings -version: 1 -maintainer: "Charles Butler " diff --git a/kubernetes-worker/hooks/relations/kube-dns/provides.py b/kubernetes-worker/hooks/relations/kube-dns/provides.py deleted file mode 100644 index a7199c3..0000000 --- a/kubernetes-worker/hooks/relations/kube-dns/provides.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charms.reactive import Endpoint, toggle_flag - - -class KubeDNSProvider(Endpoint): - def manage_flags(self): - toggle_flag(self.expand_name('{endpoint_name}.connected'), - self.is_joined) - - def set_dns_info(self, *, domain, sdn_ip, port): - '''Set the domain, sdn_ip, and port of the DNS provider.''' - for relation in self.relations: - relation.to_publish_raw.update({ - 'domain': domain, - 'sdn-ip': sdn_ip, - 'port': port, - }) diff --git a/kubernetes-worker/hooks/relations/kube-dns/requires.py b/kubernetes-worker/hooks/relations/kube-dns/requires.py deleted file mode 100644 index 9595c4a..0000000 --- a/kubernetes-worker/hooks/relations/kube-dns/requires.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/python -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charms.reactive import Endpoint, toggle_flag - - -class KubeDNSRequireer(Endpoint): - def manage_flags(self): - '''Set flags according to whether we have DNS provider details.''' - toggle_flag(self.expand_name('{endpoint_name}.available'), - self.has_info()) - - def details(self): - '''Return the DNS provider details.''' - return { - 'domain': self._get_value('domain'), - 'sdn-ip': self._get_value('sdn-ip'), - 'port': self._get_value('port'), - } - - def has_info(self): - ''' Determine if we have all needed info''' - return all(self.details().values()) - - def _get_value(self, key): - return self.all_joined_units.received_raw.get(key) diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml b/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml new file mode 100644 index 0000000..9801450 --- /dev/null +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.github/workflows/tests.yaml @@ -0,0 +1,24 @@ +name: Test Suite for K8s Service Interface + +on: + - pull_request + +jobs: + lint-and-unit-tests: + name: Lint & Unit tests + runs-on: ubuntu-latest + strategy: + matrix: + python: [3.6, 3.7, 3.8, 3.9] + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install Tox + run: pip install tox + - name: Run lint & unit tests + run: tox + diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore index e43b0f9..8d150f3 100644 --- a/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/.gitignore @@ -1 +1,4 @@ .DS_Store +.tox +__pycache__ +*.pyc diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml b/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml deleted file mode 100644 index d2be8be..0000000 --- a/kubernetes-worker/hooks/relations/kubernetes-cni/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: python -python: - - "3.5" - - "3.6" - - "3.7" -install: - - pip install tox-travis -script: - - tox diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py index 0b4aada..9095c19 100644 --- a/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/provides.py @@ -1,48 +1,46 @@ #!/usr/bin/python from charmhelpers.core import hookenv +from charmhelpers.core.host import file_hash +from charms.layer.kubernetes_common import kubeclientconfig_path from charms.reactive import Endpoint from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag class CNIPluginProvider(Endpoint): def manage_flags(self): - toggle_flag(self.expand_name('{endpoint_name}.connected'), - self.is_joined) - toggle_flag(self.expand_name('{endpoint_name}.available'), - self.config_available()) - if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')): - clear_flag(self.expand_name('{endpoint_name}.configured')) - clear_flag(self.expand_name('endpoint.{endpoint_name}.changed')) + toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined) + toggle_flag( + self.expand_name("{endpoint_name}.available"), self.config_available() + ) + if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")): + clear_flag(self.expand_name("{endpoint_name}.configured")) + clear_flag(self.expand_name("endpoint.{endpoint_name}.changed")) - def set_config(self, is_master, kubeconfig_path): - ''' Relays a dict of kubernetes configuration information. ''' + def set_config(self, is_master): + """Relays a dict of kubernetes configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'is_master': is_master, - 'kubeconfig_path': kubeconfig_path - }) - set_flag(self.expand_name('{endpoint_name}.configured')) + relation.to_publish_raw.update({"is_master": is_master}) + set_flag(self.expand_name("{endpoint_name}.configured")) def config_available(self): - ''' Ensures all config from the CNI plugin is available. ''' + """Ensures all config from the CNI plugin is available.""" goal_state = hookenv.goal_state() related_apps = [ - app for app in goal_state.get('relations', {}).get(self.endpoint_name, '') - if '/' not in app + app + for app in goal_state.get("relations", {}).get(self.endpoint_name, "") + if "/" not in app ] if not related_apps: return False configs = self.get_configs() return all( - 'cidr' in config and 'cni-conf-file' in config - for config in [ - configs.get(related_app, {}) for related_app in related_apps - ] + "cidr" in config and "cni-conf-file" in config + for config in [configs.get(related_app, {}) for related_app in related_apps] ) def get_config(self, default=None): - ''' Get CNI config for one related application. + """Get CNI config for one related application. If default is specified, and there is a related application with a matching name, then that application is chosen. Otherwise, the @@ -50,13 +48,13 @@ class CNIPluginProvider(Endpoint): Whichever application is chosen, that application's CNI config is returned. - ''' + """ configs = self.get_configs() if not configs: return {} elif default and default not in configs: - msg = 'relation not found for default CNI %s, ignoring' % default - hookenv.log(msg, level='WARN') + msg = "relation not found for default CNI %s, ignoring" % default + hookenv.log(msg, level="WARN") return self.get_config() elif default: return configs.get(default, {}) @@ -64,7 +62,7 @@ class CNIPluginProvider(Endpoint): return configs.get(sorted(configs)[0], {}) def get_configs(self): - ''' Get CNI configs for all related applications. + """Get CNI configs for all related applications. This returns a mapping of application names to CNI configs. Here's an example return value: @@ -78,8 +76,14 @@ class CNIPluginProvider(Endpoint): 'cni-conf-file': '10-calico.conflist' } } - ''' + """ return { relation.application_name: relation.joined_units.received_raw - for relation in self.relations if relation.application_name + for relation in self.relations + if relation.application_name } + + def notify_kubeconfig_changed(self): + kubeconfig_hash = file_hash(kubeclientconfig_path) + for relation in self.relations: + relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash}) diff --git a/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py index 039b912..2067826 100644 --- a/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py +++ b/kubernetes-worker/hooks/relations/kubernetes-cni/requires.py @@ -1,45 +1,54 @@ #!/usr/bin/python +from charmhelpers.core import unitdata from charms.reactive import Endpoint from charms.reactive import when_any, when_not from charms.reactive import set_state, remove_state +db = unitdata.kv() + class CNIPluginClient(Endpoint): + def manage_flags(self): + kubeconfig_hash = self.get_config().get("kubeconfig-hash") + kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash") + if kubeconfig_hash: + set_state(self.expand_name("{endpoint_name}.kubeconfig.available")) + if kubeconfig_hash != db.get(kubeconfig_hash_key): + set_state(self.expand_name("{endpoint_name}.kubeconfig.changed")) + db.set(kubeconfig_hash_key, kubeconfig_hash) - @when_any('endpoint.{endpoint_name}.joined', - 'endpoint.{endpoint_name}.changed') + @when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed") def changed(self): - ''' Indicate the relation is connected, and if the relation data is - set it is also available. ''' - set_state(self.expand_name('{endpoint_name}.connected')) + """Indicate the relation is connected, and if the relation data is + set it is also available.""" + set_state(self.expand_name("{endpoint_name}.connected")) config = self.get_config() - if config['is_master'] == 'True': - set_state(self.expand_name('{endpoint_name}.is-master')) - set_state(self.expand_name('{endpoint_name}.configured')) - elif config['is_master'] == 'False': - set_state(self.expand_name('{endpoint_name}.is-worker')) - set_state(self.expand_name('{endpoint_name}.configured')) + if config["is_master"] == "True": + set_state(self.expand_name("{endpoint_name}.is-master")) + set_state(self.expand_name("{endpoint_name}.configured")) + elif config["is_master"] == "False": + set_state(self.expand_name("{endpoint_name}.is-worker")) + set_state(self.expand_name("{endpoint_name}.configured")) else: - remove_state(self.expand_name('{endpoint_name}.configured')) - remove_state(self.expand_name('endpoint.{endpoint_name}.changed')) + remove_state(self.expand_name("{endpoint_name}.configured")) + remove_state(self.expand_name("endpoint.{endpoint_name}.changed")) - @when_not('endpoint.{endpoint_name}.joined') + @when_not("endpoint.{endpoint_name}.joined") def broken(self): - ''' Indicate the relation is no longer available and not connected. ''' - remove_state(self.expand_name('{endpoint_name}.connected')) - remove_state(self.expand_name('{endpoint_name}.is-master')) - remove_state(self.expand_name('{endpoint_name}.is-worker')) - remove_state(self.expand_name('{endpoint_name}.configured')) + """Indicate the relation is no longer available and not connected.""" + remove_state(self.expand_name("{endpoint_name}.connected")) + remove_state(self.expand_name("{endpoint_name}.is-master")) + remove_state(self.expand_name("{endpoint_name}.is-worker")) + remove_state(self.expand_name("{endpoint_name}.configured")) def get_config(self): - ''' Get the kubernetes configuration information. ''' + """Get the kubernetes configuration information.""" return self.all_joined_units.received_raw def set_config(self, cidr, cni_conf_file): - ''' Sets the CNI configuration information. ''' + """Sets the CNI configuration information.""" for relation in self.relations: - relation.to_publish_raw.update({ - 'cidr': cidr, - 'cni-conf-file': cni_conf_file - }) + relation.to_publish_raw.update( + {"cidr": cidr, "cni-conf-file": cni_conf_file} + ) diff --git a/kubernetes-worker/layer.yaml b/kubernetes-worker/layer.yaml index 7f20f03..3945048 100644 --- a/kubernetes-worker/layer.yaml +++ b/kubernetes-worker/layer.yaml @@ -17,7 +17,6 @@ - "layer:kubernetes-master-worker-base" - "interface:http" - "interface:kubernetes-cni" -- "interface:kube-dns" - "interface:kube-control" - "interface:aws-integration" - "interface:gcp-integration" diff --git a/kubernetes-worker/lib/charms/layer/basic.py b/kubernetes-worker/lib/charms/layer/basic.py index 7507203..bbdd074 100644 --- a/kubernetes-worker/lib/charms/layer/basic.py +++ b/kubernetes-worker/lib/charms/layer/basic.py @@ -199,7 +199,13 @@ def bootstrap_charm_deps(): # a set so that we can ignore the pre-install packages and let pip # choose the best version in case there are multiple from layer # conflicts) - pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs) + _versions = _load_wheelhouse_versions() + _pkgs = _versions.keys() - set(pre_install_pkgs) + # add back the versions such that each package in pkgs is + # ==. + # This ensures that pip 20.3.4+ will install the packages from the + # wheelhouse without (erroneously) flagging an error. + pkgs = _add_back_versions(_pkgs, _versions) reinstall_flag = '--force-reinstall' if not cfg.get('use_venv', True) and pre_eoan: reinstall_flag = '--ignore-installed' @@ -278,6 +284,55 @@ def _load_wheelhouse_versions(): return versions +def _add_back_versions(pkgs, versions): + """Add back the version strings to each of the packages. + + The versions are LooseVersion() from _load_wheelhouse_versions(). This + function strips the ".zip" or ".tar.gz" from the end of the version string + and adds it back to the package in the form of == + + If a package name is not a key in the versions dictionary, then it is + returned in the list unchanged. + + :param pkgs: A list of package names + :type pkgs: List[str] + :param versions: A map of package to LooseVersion + :type versions: Dict[str, LooseVersion] + :returns: A list of (maybe) versioned packages + :rtype: List[str] + """ + def _strip_ext(s): + """Strip an extension (if it exists) from the string + + :param s: the string to strip an extension off if it exists + :type s: str + :returns: string without an extension of .zip or .tar.gz + :rtype: str + """ + for ending in [".zip", ".tar.gz"]: + if s.endswith(ending): + return s[:-len(ending)] + return s + + def _maybe_add_version(pkg): + """Maybe add back the version number to a package if it exists. + + Adds the version number, if the package exists in the lexically + captured `versions` dictionary, in the form ==. Strips + the extension if it exists. + + :param pkg: the package name to (maybe) add the version number to. + :type pkg: str + """ + try: + return "{}=={}".format(pkg, _strip_ext(str(versions[pkg]))) + except KeyError: + pass + return pkg + + return [_maybe_add_version(pkg) for pkg in pkgs] + + def _update_if_newer(pip, pkgs): installed = _load_installed_versions(pip) wheelhouse = _load_wheelhouse_versions() diff --git a/kubernetes-worker/lib/charms/layer/kubernetes_common.py b/kubernetes-worker/lib/charms/layer/kubernetes_common.py index 0ac309f..fb14ad2 100644 --- a/kubernetes-worker/lib/charms/layer/kubernetes_common.py +++ b/kubernetes-worker/lib/charms/layer/kubernetes_common.py @@ -21,7 +21,12 @@ import subprocess import hashlib import json import traceback +import random +import string +import tempfile +import yaml +from base64 import b64decode, b64encode from pathlib import Path from subprocess import check_output, check_call from socket import gethostname, getfqdn @@ -29,19 +34,23 @@ from shlex import split from subprocess import CalledProcessError from charmhelpers.core import hookenv, unitdata from charmhelpers.core import host +from charmhelpers.core.templating import render from charms.reactive import endpoint_from_flag, is_state from time import sleep +AUTH_SECRET_NS = "kube-system" +AUTH_SECRET_TYPE = "juju.is/token-auth" + db = unitdata.kv() -kubeclientconfig_path = '/root/.kube/config' -gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS' -kubeproxyconfig_path = '/root/cdk/kubeproxyconfig' -certs_dir = Path('/root/cdk') -ca_crt_path = certs_dir / 'ca.crt' -server_crt_path = certs_dir / 'server.crt' -server_key_path = certs_dir / 'server.key' -client_crt_path = certs_dir / 'client.crt' -client_key_path = certs_dir / 'client.key' +kubeclientconfig_path = "/root/.kube/config" +gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS" +kubeproxyconfig_path = "/root/cdk/kubeproxyconfig" +certs_dir = Path("/root/cdk") +ca_crt_path = certs_dir / "ca.crt" +server_crt_path = certs_dir / "server.crt" +server_key_path = certs_dir / "server.key" +client_crt_path = certs_dir / "client.crt" +client_key_path = certs_dir / "client.key" def get_version(bin_name): @@ -56,13 +65,13 @@ def get_version(bin_name): (1, 6, 0) """ - cmd = '{} --version'.format(bin_name).split() - version_string = subprocess.check_output(cmd).decode('utf-8') + cmd = "{} --version".format(bin_name).split() + version_string = subprocess.check_output(cmd).decode("utf-8") return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) def retry(times, delay_secs): - """ Decorator for retrying a method call. + """Decorator for retrying a method call. Args: times: How many times should we retry before giving up @@ -72,7 +81,7 @@ def retry(times, delay_secs): """ def retry_decorator(func): - """ Decorator to wrap the function provided. + """Decorator to wrap the function provided. Args: func: Provided function should return either True od False @@ -80,6 +89,7 @@ def retry(times, delay_secs): Returns: A callable that would return the last call outcome """ + def _wrapped(*args, **kwargs): res = func(*args, **kwargs) attempt = 0 @@ -90,36 +100,37 @@ def retry(times, delay_secs): break attempt += 1 return res + return _wrapped return retry_decorator def calculate_resource_checksum(resource): - ''' Calculate a checksum for a resource ''' + """Calculate a checksum for a resource""" md5 = hashlib.md5() path = hookenv.resource_get(resource) if path: - with open(path, 'rb') as f: + with open(path, "rb") as f: data = f.read() md5.update(data) return md5.hexdigest() def get_resource_checksum_db_key(checksum_prefix, resource): - ''' Convert a resource name to a resource checksum database key. ''' + """Convert a resource name to a resource checksum database key.""" return checksum_prefix + resource def migrate_resource_checksums(checksum_prefix, snap_resources): - ''' Migrate resource checksums from the old schema to the new one ''' + """Migrate resource checksums from the old schema to the new one""" for resource in snap_resources: new_key = get_resource_checksum_db_key(checksum_prefix, resource) if not db.get(new_key): path = hookenv.resource_get(resource) if path: # old key from charms.reactive.helpers.any_file_changed - old_key = 'reactive.files_changed.' + path + old_key = "reactive.files_changed." + path old_checksum = db.get(old_key) db.set(new_key, old_checksum) else: @@ -131,7 +142,7 @@ def migrate_resource_checksums(checksum_prefix, snap_resources): def check_resources_for_upgrade_needed(checksum_prefix, snap_resources): - hookenv.status_set('maintenance', 'Checking resources') + hookenv.status_set("maintenance", "Checking resources") for resource in snap_resources: key = get_resource_checksum_db_key(checksum_prefix, resource) old_checksum = db.get(key) @@ -148,25 +159,31 @@ def calculate_and_store_resource_checksums(checksum_prefix, snap_resources): db.set(key, checksum) -def get_ingress_address(endpoint_name): +def get_ingress_address(endpoint_name, ignore_addresses=None): try: network_info = hookenv.network_get(endpoint_name) except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: # if they don't have ingress-addresses they are running a juju that # doesn't support spaces, so just return the private address - return hookenv.unit_get('private-address') + return hookenv.unit_get("private-address") - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] + + if ignore_addresses: + hookenv.log("ingress-addresses before filtering: {}".format(addresses)) + iter_filter = filter(lambda item: item not in ignore_addresses, addresses) + addresses = list(iter_filter) + hookenv.log("ingress-addresses after filtering: {}".format(addresses)) # Need to prefer non-fan IP addresses due to various issues, e.g. # https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997 # Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll # prioritize those last. Not technically correct, but good enough. try: - sort_key = lambda a: int(a.partition('.')[0]) >= 240 # noqa: E731 + sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731 addresses = sorted(addresses, key=sort_key) except Exception: hookenv.log(traceback.format_exc()) @@ -180,10 +197,10 @@ def get_ingress_address6(endpoint_name): except NotImplementedError: network_info = {} - if not network_info or 'ingress-addresses' not in network_info: + if not network_info or "ingress-addresses" not in network_info: return None - addresses = network_info['ingress-addresses'] + addresses = network_info["ingress-addresses"] for addr in addresses: ip_addr = ipaddress.ip_interface(addr).ip @@ -194,35 +211,35 @@ def get_ingress_address6(endpoint_name): def service_restart(service_name): - hookenv.status_set('maintenance', 'Restarting {0} service'.format( - service_name)) + hookenv.status_set("maintenance", "Restarting {0} service".format(service_name)) host.service_restart(service_name) def service_start(service_name): - hookenv.log('Starting {0} service.'.format(service_name)) + hookenv.log("Starting {0} service.".format(service_name)) host.service_stop(service_name) def service_stop(service_name): - hookenv.log('Stopping {0} service.'.format(service_name)) + hookenv.log("Stopping {0} service.".format(service_name)) host.service_stop(service_name) def arch(): - '''Return the package architecture as a string. Raise an exception if the - architecture is not supported by kubernetes.''' + """Return the package architecture as a string. Raise an exception if the + architecture is not supported by kubernetes.""" # Get the package architecture for this system. - architecture = check_output(['dpkg', '--print-architecture']).rstrip() + architecture = check_output(["dpkg", "--print-architecture"]).rstrip() # Convert the binary result into a string. - architecture = architecture.decode('utf-8') + architecture = architecture.decode("utf-8") return architecture def get_service_ip(service, namespace="kube-system", errors_fatal=True): try: - output = kubectl('get', 'service', '--namespace', namespace, service, - '--output', 'json') + output = kubectl( + "get", "service", "--namespace", namespace, service, "--output", "json" + ) except CalledProcessError: if errors_fatal: raise @@ -230,20 +247,20 @@ def get_service_ip(service, namespace="kube-system", errors_fatal=True): return None else: svc = json.loads(output.decode()) - return svc['spec']['clusterIP'] + return svc["spec"]["clusterIP"] def kubectl(*args): - ''' Run a kubectl cli command with a config file. Returns stdout and throws - an error if the command fails. ''' - command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args) - hookenv.log('Executing {}'.format(command)) + """Run a kubectl cli command with a config file. Returns stdout and throws + an error if the command fails.""" + command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args) + hookenv.log("Executing {}".format(command)) return check_output(command) def kubectl_success(*args): - ''' Runs kubectl with the given args. Returns True if successful, False if - not. ''' + """Runs kubectl with the given args. Returns True if successful, False if + not.""" try: kubectl(*args) return True @@ -252,75 +269,97 @@ def kubectl_success(*args): def kubectl_manifest(operation, manifest): - ''' Wrap the kubectl creation command when using filepath resources + """Wrap the kubectl creation command when using filepath resources :param operation - one of get, create, delete, replace :param manifest - filepath to the manifest - ''' + """ # Deletions are a special case - if operation == 'delete': + if operation == "delete": # Ensure we immediately remove requested resources with --now - return kubectl_success(operation, '-f', manifest, '--now') + return kubectl_success(operation, "-f", manifest, "--now") else: # Guard against an error re-creating the same manifest multiple times - if operation == 'create': + if operation == "create": # If we already have the definition, its probably safe to assume # creation was true. - if kubectl_success('get', '-f', manifest): - hookenv.log('Skipping definition for {}'.format(manifest)) + if kubectl_success("get", "-f", manifest): + hookenv.log("Skipping definition for {}".format(manifest)) return True # Execute the requested command that did not match any of the special # cases above - return kubectl_success(operation, '-f', manifest) + return kubectl_success(operation, "-f", manifest) def get_node_name(): - kubelet_extra_args = parse_extra_args('kubelet-extra-args') - cloud_provider = kubelet_extra_args.get('cloud-provider', '') - if is_state('endpoint.aws.ready'): - cloud_provider = 'aws' - elif is_state('endpoint.gcp.ready'): - cloud_provider = 'gce' - elif is_state('endpoint.openstack.ready'): - cloud_provider = 'openstack' - elif is_state('endpoint.vsphere.ready'): - cloud_provider = 'vsphere' - elif is_state('endpoint.azure.ready'): - cloud_provider = 'azure' - if cloud_provider == 'aws': + kubelet_extra_args = parse_extra_args("kubelet-extra-args") + cloud_provider = kubelet_extra_args.get("cloud-provider", "") + if is_state("endpoint.aws.ready"): + cloud_provider = "aws" + elif is_state("endpoint.gcp.ready"): + cloud_provider = "gce" + elif is_state("endpoint.openstack.ready"): + cloud_provider = "openstack" + elif is_state("endpoint.vsphere.ready"): + cloud_provider = "vsphere" + elif is_state("endpoint.azure.ready"): + cloud_provider = "azure" + if cloud_provider == "aws": return getfqdn().lower() else: return gethostname().lower() -def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, - user='ubuntu', context='juju-context', - cluster='juju-cluster', password=None, token=None, - keystone=False, aws_iam_cluster_id=None): - '''Create a configuration for Kubernetes based on path using the supplied +def create_kubeconfig( + kubeconfig, + server, + ca, + key=None, + certificate=None, + user="ubuntu", + context="juju-context", + cluster="juju-cluster", + password=None, + token=None, + keystone=False, + aws_iam_cluster_id=None, +): + """Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user - context and cluster.''' + context and cluster.""" if not key and not certificate and not password and not token: - raise ValueError('Missing authentication mechanism.') + raise ValueError("Missing authentication mechanism.") + elif key and not certificate: + raise ValueError("Missing certificate.") + elif not key and certificate: + raise ValueError("Missing key.") + elif token and password: + # token and password are mutually exclusive. Error early if both are + # present. The developer has requested an impossible situation. + # see: kubectl config set-credentials --help + raise ValueError("Token and Password are mutually exclusive.") + + old_kubeconfig = Path(kubeconfig) + new_kubeconfig = Path(str(kubeconfig) + ".new") - # token and password are mutually exclusive. Error early if both are - # present. The developer has requested an impossible situation. - # see: kubectl config set-credentials --help - if token and password: - raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. - cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ - '--server={2} --certificate-authority={3} --embed-certs=true' - check_call(split(cmd.format(kubeconfig, cluster, server, ca))) + cmd = ( + "kubectl config --kubeconfig={0} set-cluster {1} " + "--server={2} --certificate-authority={3} --embed-certs=true" + ) + check_call(split(cmd.format(new_kubeconfig, cluster, server, ca))) # Delete old users - cmd = 'kubectl config --kubeconfig={0} unset users' - check_call(split(cmd.format(kubeconfig))) + cmd = "kubectl config --kubeconfig={0} unset users" + check_call(split(cmd.format(new_kubeconfig))) # Create the credentials using the client flags. - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials {1} '.format(kubeconfig, user) + cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format( + new_kubeconfig, user + ) if key and certificate: - cmd = '{0} --client-key={1} --client-certificate={2} '\ - '--embed-certs=true'.format(cmd, key, certificate) + cmd = ( + "{0} --client-key={1} --client-certificate={2} " + "--embed-certs=true".format(cmd, key, certificate) + ) if password: cmd = "{0} --username={1} --password={2}".format(cmd, user, password) # This is mutually exclusive from password. They will not work together. @@ -328,71 +367,87 @@ def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, cmd = "{0} --token={1}".format(cmd, token) check_call(split(cmd)) # Create a default context with the cluster. - cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ - '--cluster={2} --user={3}' - check_call(split(cmd.format(kubeconfig, context, cluster, user))) + cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}" + check_call(split(cmd.format(new_kubeconfig, context, cluster, user))) # Make the config use this new context. - cmd = 'kubectl config --kubeconfig={0} use-context {1}' - check_call(split(cmd.format(kubeconfig, context))) + cmd = "kubectl config --kubeconfig={0} use-context {1}" + check_call(split(cmd.format(new_kubeconfig, context))) if keystone: # create keystone user - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-credentials keystone-user'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format( + new_kubeconfig + ) check_call(split(cmd)) # create keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=keystone-user keystone'.format(kubeconfig, cluster) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=keystone-user keystone".format(new_kubeconfig, cluster) + ) check_call(split(cmd)) # use keystone context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'use-context keystone'.format(kubeconfig) + cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format( + new_kubeconfig + ) check_call(split(cmd)) # manually add exec command until kubectl can do it for us - with open(kubeconfig, "r") as f: + with open(new_kubeconfig, "r") as f: content = f.read() - content = content.replace("""- name: keystone-user - user: {}""", """- name: keystone-user + content = content.replace( + """- name: keystone-user + user: {}""", + """- name: keystone-user user: exec: command: "/snap/bin/client-keystone-auth" apiVersion: "client.authentication.k8s.io/v1beta1" -""") - with open(kubeconfig, "w") as f: +""", + ) + with open(new_kubeconfig, "w") as f: f.write(content) if aws_iam_cluster_id: # create aws-iam context - cmd = 'kubectl config --kubeconfig={0} ' \ - 'set-context --cluster={1} ' \ - '--user=aws-iam-user aws-iam-authenticator' - check_call(split(cmd.format(kubeconfig, cluster))) + cmd = ( + "kubectl config --kubeconfig={0} " + "set-context --cluster={1} " + "--user=aws-iam-user aws-iam-authenticator" + ) + check_call(split(cmd.format(new_kubeconfig, cluster))) # append a user for aws-iam - cmd = 'kubectl --kubeconfig={0} config set-credentials ' \ - 'aws-iam-user --exec-command=aws-iam-authenticator ' \ - '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' \ - '--exec-arg="-r" --exec-arg="<>" ' \ - '--exec-api-version=client.authentication.k8s.io/v1alpha1' - check_call(split(cmd.format(kubeconfig, aws_iam_cluster_id))) + cmd = ( + "kubectl --kubeconfig={0} config set-credentials " + "aws-iam-user --exec-command=aws-iam-authenticator " + '--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" ' + '--exec-arg="-r" --exec-arg="<>" ' + "--exec-api-version=client.authentication.k8s.io/v1alpha1" + ) + check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id))) # not going to use aws-iam context by default since we don't have # the desired arn. This will make the config not usable if copied. # cmd = 'kubectl config --kubeconfig={0} ' \ - # 'use-context aws-iam-authenticator'.format(kubeconfig) + # 'use-context aws-iam-authenticator'.format(new_kubeconfig) # check_call(split(cmd)) + if old_kubeconfig.exists(): + changed = new_kubeconfig.read_text() != old_kubeconfig.read_text() + else: + changed = True + if changed: + new_kubeconfig.rename(old_kubeconfig) def parse_extra_args(config_key): - elements = hookenv.config().get(config_key, '').split() + elements = hookenv.config().get(config_key, "").split() args = {} for element in elements: - if '=' in element: - key, _, value = element.partition('=') + if "=" in element: + key, _, value = element.partition("=") args[key] = value else: - args[element] = 'true' + args[element] = "true" return args @@ -411,7 +466,7 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # CIS benchmark action may inject kv config to pass failing tests. Merge # these after the func args as they should take precedence. - cis_args_key = 'cis-' + service + cis_args_key = "cis-" + service cis_args = db.get(cis_args_key) or {} args.update(cis_args) @@ -419,16 +474,16 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): # construct an arg string for use by 'snap set'. args = {k: v for k, v in args.items() if v is not None} args = ['--%s="%s"' % arg for arg in args.items()] - args = ' '.join(args) + args = " ".join(args) snap_opts = {} for arg in prev_snap_args: # remove previous args by setting to null - snap_opts[arg] = 'null' - snap_opts['args'] = args - snap_opts = ['%s=%s' % opt for opt in snap_opts.items()] + snap_opts[arg] = "null" + snap_opts["args"] = args + snap_opts = ["%s=%s" % opt for opt in snap_opts.items()] - cmd = ['snap', 'set', service] + snap_opts + cmd = ["snap", "set", service] + snap_opts check_call(cmd) # Now that we've started doing snap configuration through the "args" @@ -437,36 +492,36 @@ def configure_kubernetes_service(key, service, base_args, extra_args_key): def _snap_common_path(component): - return Path('/var/snap/{}/common'.format(component)) + return Path("/var/snap/{}/common".format(component)) def cloud_config_path(component): - return _snap_common_path(component) / 'cloud-config.conf' + return _snap_common_path(component) / "cloud-config.conf" def _gcp_creds_path(component): - return _snap_common_path(component) / 'gcp-creds.json' + return _snap_common_path(component) / "gcp-creds.json" def _daemon_env_path(component): - return _snap_common_path(component) / 'environment' + return _snap_common_path(component) / "environment" def _cloud_endpoint_ca_path(component): - return _snap_common_path(component) / 'cloud-endpoint-ca.crt' + return _snap_common_path(component) / "cloud-endpoint-ca.crt" def encryption_config_path(): - apiserver_snap_common_path = _snap_common_path('kube-apiserver') - encryption_conf_dir = apiserver_snap_common_path / 'encryption' - return encryption_conf_dir / 'encryption_config.yaml' + apiserver_snap_common_path = _snap_common_path("kube-apiserver") + encryption_conf_dir = apiserver_snap_common_path / "encryption" + return encryption_conf_dir / "encryption_config.yaml" def write_gcp_snap_config(component): # gcp requires additional credentials setup - gcp = endpoint_from_flag('endpoint.gcp.ready') + gcp = endpoint_from_flag("endpoint.gcp.ready") creds_path = _gcp_creds_path(component) - with creds_path.open('w') as fp: + with creds_path.open("w") as fp: os.fchmod(fp.fileno(), 0o600) fp.write(gcp.credentials) @@ -474,197 +529,206 @@ def write_gcp_snap_config(component): # services use the creds env var instead of the metadata server, as # well as making the cluster multizone comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text('[Global]\n' - 'token-url = nil\n' - 'multizone = true\n') + comp_cloud_config_path.write_text( + "[Global]\n" "token-url = nil\n" "multizone = true\n" + ) daemon_env_path = _daemon_env_path(component) if daemon_env_path.exists(): daemon_env = daemon_env_path.read_text() - if not daemon_env.endswith('\n'): - daemon_env += '\n' + if not daemon_env.endswith("\n"): + daemon_env += "\n" else: - daemon_env = '' + daemon_env = "" if gcp_creds_env_key not in daemon_env: - daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path) + daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path) daemon_env_path.parent.mkdir(parents=True, exist_ok=True) daemon_env_path.write_text(daemon_env) def generate_openstack_cloud_config(): # openstack requires additional credentials setup - openstack = endpoint_from_flag('endpoint.openstack.ready') + openstack = endpoint_from_flag("endpoint.openstack.ready") lines = [ - '[Global]', - 'auth-url = {}'.format(openstack.auth_url), - 'region = {}'.format(openstack.region), - 'username = {}'.format(openstack.username), - 'password = {}'.format(openstack.password), - 'tenant-name = {}'.format(openstack.project_name), - 'domain-name = {}'.format(openstack.user_domain_name), - 'tenant-domain-name = {}'.format(openstack.project_domain_name), + "[Global]", + "auth-url = {}".format(openstack.auth_url), + "region = {}".format(openstack.region), + "username = {}".format(openstack.username), + "password = {}".format(openstack.password), + "tenant-name = {}".format(openstack.project_name), + "domain-name = {}".format(openstack.user_domain_name), + "tenant-domain-name = {}".format(openstack.project_domain_name), ] if openstack.endpoint_tls_ca: - lines.append('ca-file = /etc/config/endpoint-ca.cert') + lines.append("ca-file = /etc/config/endpoint-ca.cert") - lines.extend([ - '', - '[LoadBalancer]', - ]) + lines.extend( + [ + "", + "[LoadBalancer]", + ] + ) if openstack.has_octavia in (True, None): # Newer integrator charm will detect whether underlying OpenStack has # Octavia enabled so we can set this intelligently. If we're still # related to an older integrator, though, default to assuming Octavia # is available. - lines.append('use-octavia = true') + lines.append("use-octavia = true") else: - lines.append('use-octavia = false') - lines.append('lb-provider = haproxy') + lines.append("use-octavia = false") + lines.append("lb-provider = haproxy") if openstack.subnet_id: - lines.append('subnet-id = {}'.format(openstack.subnet_id)) + lines.append("subnet-id = {}".format(openstack.subnet_id)) if openstack.floating_network_id: - lines.append('floating-network-id = {}'.format( - openstack.floating_network_id)) + lines.append("floating-network-id = {}".format(openstack.floating_network_id)) if openstack.lb_method: - lines.append('lb-method = {}'.format( - openstack.lb_method)) + lines.append("lb-method = {}".format(openstack.lb_method)) if openstack.manage_security_groups: - lines.append('manage-security-groups = {}'.format( - openstack.manage_security_groups)) - if any([openstack.bs_version, - openstack.trust_device_path, - openstack.ignore_volume_az]): - lines.append('') - lines.append('[BlockStorage]') + lines.append( + "manage-security-groups = {}".format(openstack.manage_security_groups) + ) + if any( + [openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az] + ): + lines.append("") + lines.append("[BlockStorage]") if openstack.bs_version is not None: - lines.append('bs-version = {}'.format(openstack.bs_version)) + lines.append("bs-version = {}".format(openstack.bs_version)) if openstack.trust_device_path is not None: - lines.append('trust-device-path = {}'.format( - openstack.trust_device_path)) + lines.append("trust-device-path = {}".format(openstack.trust_device_path)) if openstack.ignore_volume_az is not None: - lines.append('ignore-volume-az = {}'.format( - openstack.ignore_volume_az)) - return '\n'.join(lines) + '\n' + lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az)) + return "\n".join(lines) + "\n" def write_azure_snap_config(component): - azure = endpoint_from_flag('endpoint.azure.ready') + azure = endpoint_from_flag("endpoint.azure.ready") comp_cloud_config_path = cloud_config_path(component) - comp_cloud_config_path.write_text(json.dumps({ - 'useInstanceMetadata': True, - 'useManagedIdentityExtension': True, - 'subscriptionId': azure.subscription_id, - 'resourceGroup': azure.resource_group, - 'location': azure.resource_group_location, - 'vnetName': azure.vnet_name, - 'vnetResourceGroup': azure.vnet_resource_group, - 'subnetName': azure.subnet_name, - 'securityGroupName': azure.security_group_name, - 'loadBalancerSku': 'standard' - })) + comp_cloud_config_path.write_text( + json.dumps( + { + "useInstanceMetadata": True, + "useManagedIdentityExtension": azure.managed_identity, + "subscriptionId": azure.subscription_id, + "resourceGroup": azure.resource_group, + "location": azure.resource_group_location, + "vnetName": azure.vnet_name, + "vnetResourceGroup": azure.vnet_resource_group, + "subnetName": azure.subnet_name, + "securityGroupName": azure.security_group_name, + "loadBalancerSku": "standard", + "securityGroupResourceGroup": azure.security_group_resource_group, + "aadClientId": azure.aad_client_id, + "aadClientSecret": azure.aad_client_secret, + "tenantId": azure.tenant_id, + } + ) + ) -def configure_kube_proxy(configure_prefix, api_servers, cluster_cidr, - bind_address=None): +def configure_kube_proxy( + configure_prefix, api_servers, cluster_cidr, bind_address=None +): kube_proxy_opts = {} - kube_proxy_opts['cluster-cidr'] = cluster_cidr - kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path - kube_proxy_opts['logtostderr'] = 'true' - kube_proxy_opts['v'] = '0' + kube_proxy_opts["cluster-cidr"] = cluster_cidr + kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path + kube_proxy_opts["logtostderr"] = "true" + kube_proxy_opts["v"] = "0" num_apis = len(api_servers) - kube_proxy_opts['master'] = api_servers[get_unit_number() % num_apis] - kube_proxy_opts['hostname-override'] = get_node_name() + kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis] + kube_proxy_opts["hostname-override"] = get_node_name() if bind_address: - kube_proxy_opts['bind-address'] = bind_address + kube_proxy_opts["bind-address"] = bind_address elif is_ipv6(cluster_cidr): - kube_proxy_opts['bind-address'] = '::' + kube_proxy_opts["bind-address"] = "::" if host.is_container(): - kube_proxy_opts['conntrack-max-per-core'] = '0' + kube_proxy_opts["conntrack-max-per-core"] = "0" if is_dual_stack(cluster_cidr): - kube_proxy_opts['feature-gates'] = "IPv6DualStack=true" + kube_proxy_opts["feature-gates"] = "IPv6DualStack=true" - configure_kubernetes_service(configure_prefix, 'kube-proxy', - kube_proxy_opts, 'proxy-extra-args') + configure_kubernetes_service( + configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args" + ) def get_unit_number(): - return int(hookenv.local_unit().split('/')[1]) + return int(hookenv.local_unit().split("/")[1]) def cluster_cidr(): - '''Return the cluster CIDR provided by the CNI''' - cni = endpoint_from_flag('cni.available') + """Return the cluster CIDR provided by the CNI""" + cni = endpoint_from_flag("cni.available") if not cni: return None config = hookenv.config() - if 'default-cni' in config: + if "default-cni" in config: # master - default_cni = config['default-cni'] + default_cni = config["default-cni"] else: # worker - kube_control = endpoint_from_flag('kube-control.dns.available') + kube_control = endpoint_from_flag("kube-control.dns.available") if not kube_control: return None default_cni = kube_control.get_default_cni() - return cni.get_config(default=default_cni)['cidr'] + return cni.get_config(default=default_cni)["cidr"] def is_dual_stack(cidrs): - '''Detect IPv4/IPv6 dual stack from CIDRs''' + """Detect IPv4/IPv6 dual stack from CIDRs""" return {net.version for net in get_networks(cidrs)} == {4, 6} def is_ipv4(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv4_network(cidrs) is not None def is_ipv6(cidrs): - '''Detect IPv6 from CIDRs''' + """Detect IPv6 from CIDRs""" return get_ipv6_network(cidrs) is not None def is_ipv6_preferred(cidrs): - '''Detect if IPv6 is preffered from CIDRs''' + """Detect if IPv6 is preffered from CIDRs""" return get_networks(cidrs)[0].version == 6 def get_networks(cidrs): - '''Convert a comma-separated list of CIDRs to a list of networks.''' + """Convert a comma-separated list of CIDRs to a list of networks.""" if not cidrs: return [] - return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(',')] + return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")] def get_ipv4_network(cidrs): - '''Get the IPv4 network from the given CIDRs or None''' + """Get the IPv4 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(4) def get_ipv6_network(cidrs): - '''Get the IPv6 network from the given CIDRs or None''' + """Get the IPv6 network from the given CIDRs or None""" return {net.version: net for net in get_networks(cidrs)}.get(6) def enable_ipv6_forwarding(): - '''Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.''' - check_call(['sysctl', 'net.ipv6.conf.all.forwarding=1']) + """Enable net.ipv6.conf.all.forwarding in sysctl if it is not already.""" + check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"]) def get_bind_addrs(ipv4=True, ipv6=True): - '''Get all global-scoped addresses that we might bind to.''' + """Get all global-scoped addresses that we might bind to.""" try: output = check_output(["ip", "-br", "addr", "show", "scope", "global"]) except CalledProcessError: # stderr will have any details, and go to the log - hookenv.log('Unable to determine global addresses', hookenv.ERROR) + hookenv.log("Unable to determine global addresses", hookenv.ERROR) return [] - ignore_interfaces = ('lxdbr', 'flannel', 'cni', 'virbr', 'docker') + ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker") accept_versions = set() if ipv4: accept_versions.add(4) @@ -672,10 +736,11 @@ def get_bind_addrs(ipv4=True, ipv6=True): accept_versions.add(6) addrs = [] - for line in output.decode('utf8').splitlines(): + for line in output.decode("utf8").splitlines(): intf, state, *intf_addrs = line.split() - if state != 'UP' or any(intf.startswith(prefix) - for prefix in ignore_interfaces): + if state != "UP" or any( + intf.startswith(prefix) for prefix in ignore_interfaces + ): continue for addr in intf_addrs: ip_addr = ipaddress.ip_interface(addr).ip @@ -689,24 +754,171 @@ class InvalidVMwareHost(Exception): def _get_vmware_uuid(): - serial_id_file = '/sys/class/dmi/id/product_serial' + serial_id_file = "/sys/class/dmi/id/product_serial" # The serial id from VMWare VMs comes in following format: # VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54 try: - with open(serial_id_file, 'r') as f: + with open(serial_id_file, "r") as f: serial_string = f.read().strip() if "VMware-" not in serial_string: - hookenv.log("Unable to find VMware ID in " - "product_serial: {}".format(serial_string)) + hookenv.log( + "Unable to find VMware ID in " + "product_serial: {}".format(serial_string) + ) raise InvalidVMwareHost - serial_string = serial_string.split( - "VMware-")[1].replace(" ", "").replace("-", "") + serial_string = ( + serial_string.split("VMware-")[1].replace(" ", "").replace("-", "") + ) uuid = "%s-%s-%s-%s-%s" % ( - serial_string[0:8], serial_string[8:12], serial_string[12:16], - serial_string[16:20], serial_string[20:32]) + serial_string[0:8], + serial_string[8:12], + serial_string[12:16], + serial_string[16:20], + serial_string[20:32], + ) except IOError as err: hookenv.log("Unable to read UUID from sysfs: {}".format(err)) - uuid = 'UNKNOWN' + uuid = "UNKNOWN" return uuid + +def token_generator(length=32): + """Generate a random token for use in account tokens. + + param: length - the length of the token to generate + """ + alpha = string.ascii_letters + string.digits + token = "".join(random.SystemRandom().choice(alpha) for _ in range(length)) + return token + + +def get_secret_names(): + """Return a dict of 'username: secret_id' for Charmed Kubernetes users.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except (CalledProcessError, FileNotFoundError): + # The api server may not be up, or we may be trying to run kubelet before + # the snap is installed. Send back an empty dict. + hookenv.log("Unable to get existing secrets", level=hookenv.WARNING) + return {} + + secrets = json.loads(output) + secret_names = {} + if "items" in secrets: + for secret in secrets["items"]: + try: + secret_id = secret["metadata"]["name"] + username_b64 = secret["data"]["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK secrets will have populated 'data', but not all secrets do + continue + secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id + return secret_names + + +def generate_rfc1123(length=10): + """Generate a random string compliant with RFC 1123. + + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names + + param: length - the length of the string to generate + """ + length = 253 if length > 253 else length + valid_chars = string.ascii_lowercase + string.digits + rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length)) + return rand_str + + +def create_secret(token, username, user, groups=None): + secrets = get_secret_names() + if username in secrets: + # Use existing secret ID if one exists for our username + secret_id = secrets[username] + else: + # secret IDs must be unique and rfc1123 compliant + sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower()) + secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10)) + + # The authenticator expects tokens to be in the form user::token + token_delim = "::" + if token_delim not in token: + token = "{}::{}".format(user, token) + + context = { + "type": AUTH_SECRET_TYPE, + "secret_name": secret_id, + "secret_namespace": AUTH_SECRET_NS, + "user": b64encode(user.encode("UTF-8")).decode("utf-8"), + "username": b64encode(username.encode("UTF-8")).decode("utf-8"), + "password": b64encode(token.encode("UTF-8")).decode("utf-8"), + "groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "", + } + with tempfile.NamedTemporaryFile() as tmp_manifest: + render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context) + + if kubectl_manifest("apply", tmp_manifest.name): + hookenv.log("Created secret for {}".format(username)) + return True + else: + hookenv.log("WARN: Unable to create secret for {}".format(username)) + return False + + +def get_secret_password(username): + """Get the password for the given user from the secret that CK created.""" + try: + output = kubectl( + "get", + "secrets", + "-n", + AUTH_SECRET_NS, + "--field-selector", + "type={}".format(AUTH_SECRET_TYPE), + "-o", + "json", + ).decode("UTF-8") + except CalledProcessError: + # NB: apiserver probably isn't up. This can happen on boostrap or upgrade + # while trying to build kubeconfig files. If we need the 'admin' token during + # this time, pull it directly out of the kubeconfig file if possible. + token = None + if username == "admin": + admin_kubeconfig = Path("/root/.kube/config") + if admin_kubeconfig.exists(): + data = yaml.safe_load(admin_kubeconfig.read_text()) + try: + token = data["users"][0]["user"]["token"] + except (KeyError, IndexError, TypeError): + pass + return token + except FileNotFoundError: + # New deployments may ask for a token before the kubectl snap is installed. + # Give them nothing! + return None + + secrets = json.loads(output) + if "items" in secrets: + for secret in secrets["items"]: + try: + data_b64 = secret["data"] + password_b64 = data_b64["password"].encode("UTF-8") + username_b64 = data_b64["username"].encode("UTF-8") + except (KeyError, TypeError): + # CK authn secrets will have populated 'data', but not all secrets do + continue + + password = b64decode(password_b64).decode("UTF-8") + secret_user = b64decode(username_b64).decode("UTF-8") + if username == secret_user: + return password + return None diff --git a/kubernetes-worker/lib/charms/layer/snap.py b/kubernetes-worker/lib/charms/layer/snap.py index 88b8d89..06cc4b1 100644 --- a/kubernetes-worker/lib/charms/layer/snap.py +++ b/kubernetes-worker/lib/charms/layer/snap.py @@ -300,7 +300,15 @@ def get_installed_channel(snapname): hookenv.WARNING, ) return - return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + try: + return subprocess.check_output(cmd).decode("utf-8", errors="replace").partition("tracking:")[-1].split()[0] + except Exception as e: + # If it fails to get the channel information(ex. installed via resource), return nothing. + hookenv.log( + "Cannot get snap tracking (channel): {}".format(e), + hookenv.WARNING, + ) + return def _snap_args( @@ -351,25 +359,28 @@ def _install_store(snapname, **kw): cmd.append(snapname) hookenv.log("Installing {} from store".format(snapname)) - for attempt in tenacity.Retrying( + # Use tenacity decorator for Trusty support (See LP Bug #1934163) + @tenacity.retry( wait=tenacity.wait_fixed(10), # seconds stop=tenacity.stop_after_attempt(3), reraise=True, - ): - with attempt: - try: - out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - hookenv.log( - 'Installation successful cmd="{}" output="{}"'.format(cmd, out), - level=hookenv.DEBUG, - ) - reactive.clear_flag(get_local_flag(snapname)) - except subprocess.CalledProcessError as cp: - hookenv.log( - 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), - level=hookenv.ERROR, - ) - raise + ) + def _run_install(): + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + hookenv.log( + 'Installation successful cmd="{}" output="{}"'.format(cmd, out), + level=hookenv.DEBUG, + ) + reactive.clear_flag(get_local_flag(snapname)) + except subprocess.CalledProcessError as cp: + hookenv.log( + 'Installation failed cmd="{}" returncode={} output="{}"'.format(cmd, cp.returncode, cp.output), + level=hookenv.ERROR, + ) + raise + + _run_install() def _refresh_store(snapname, **kw): diff --git a/kubernetes-worker/metadata.yaml b/kubernetes-worker/metadata.yaml index 65315cb..636e59f 100644 --- a/kubernetes-worker/metadata.yaml +++ b/kubernetes-worker/metadata.yaml @@ -25,12 +25,10 @@ "certificates": "interface": "tls-certificates" "kube-api-endpoint": + # kube-api-endpoint is not recommended as the API endpoints will be provided + # via the kube-control relation. However, it can be used to override those + # endpoints if you need to inject a reverse proxy between the master and workers. "interface": "http" - "kube-dns": - # kube-dns is deprecated. Its functionality has been rolled into the - # kube-control interface. The kube-dns relation will be removed in - # a future release. - "interface": "kube-dns" "kube-control": "interface": "kube-control" "aws": @@ -76,17 +74,29 @@ "core": "type": "file" "filename": "core.snap" - "description": "core snap" + "description": | + core snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kubectl": "type": "file" "filename": "kubectl.snap" - "description": "kubectl snap" + "description": | + kubectl snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kubelet": "type": "file" "filename": "kubelet.snap" - "description": "kubelet snap" + "description": | + kubelet snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "kube-proxy": "type": "file" "filename": "kube-proxy.snap" - "description": "kube-proxy snap" + "description": | + kube-proxy snap + [DEPRECATED] in favor of using a Snap Store Proxy. + See https://ubuntu.com/kubernetes/docs/proxies for more details. "subordinate": !!bool "false" diff --git a/kubernetes-worker/reactive/kubernetes_worker.py b/kubernetes-worker/reactive/kubernetes_worker.py index a7fab9c..eaa86f3 100644 --- a/kubernetes-worker/reactive/kubernetes_worker.py +++ b/kubernetes-worker/reactive/kubernetes_worker.py @@ -32,14 +32,16 @@ from charms import layer from charms.layer import snap from charms.reactive import hook from charms.reactive import endpoint_from_flag +from charms.reactive import endpoint_from_name from charms.reactive import remove_state, clear_flag from charms.reactive import set_state, set_flag -from charms.reactive import is_state, is_flag_set +from charms.reactive import is_state, is_flag_set, any_flags_set from charms.reactive import when, when_any, when_not, when_none from charms.reactive import data_changed, is_data_changed from charms.templating.jinja2 import render from charmhelpers.core import hookenv, unitdata +from charmhelpers.core.host import fstab_add, is_container from charmhelpers.core.host import service_stop, service_restart from charmhelpers.core.host import service_pause, service_resume from charmhelpers.contrib.charmsupport import nrpe @@ -156,6 +158,8 @@ def upgrade_charm(): kube_control = endpoint_from_flag('kube-control.connected') kube_control.manage_flags() + shutil.rmtree('/root/cdk/kubelet/dynamic-config', ignore_errors=True) + remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') remove_state('kubernetes-worker.ingress.available') @@ -439,6 +443,16 @@ def charm_status(): if is_state('kubernetes-worker.cohorts.failed'): hookenv.status_set('waiting', 'Failed to join snap cohorts (see logs), will retry.') + if missing_kube_control(): + # the check calls status_set + return + if not any_flags_set('kube-control.api_endpoints.available', + 'kube-api-endpoint.available'): + hookenv.status_set('waiting', 'Waiting for cluster endpoint.') + return + if not get_kube_api_servers(): + hookenv.status_set('waiting', 'Unable to determine cluster endpoint.') + return if not is_state('kube-control.auth.available'): hookenv.status_set('waiting', 'Waiting for cluster credentials.') return @@ -578,17 +592,18 @@ def send_data(): key_path=client_key_path) -@when('kube-api-endpoint.available', 'kube-control.dns.available', - 'cni.available', 'endpoint.container-runtime.available') +@when('kube-control.dns.available', 'cni.available', + 'endpoint.container-runtime.available') +@when_any('kube-control.api_endpoints.available', + 'kube-api-endpoint.available') def watch_for_changes(): ''' Watch for configuration changes and signal if we need to restart the worker services ''' - kube_api = endpoint_from_flag('kube-api-endpoint.available') kube_control = endpoint_from_flag('kube-control.dns.available') container_runtime = \ endpoint_from_flag('endpoint.container-runtime.available') - servers = get_kube_api_servers(kube_api) + servers = get_kube_api_servers() dns = kube_control.get_dns() cluster_cidr = kubernetes_common.cluster_cidr() container_runtime_name = \ @@ -611,7 +626,7 @@ def watch_for_changes(): set_state('kubernetes-worker.restart-needed') -@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available', +@when('kubernetes-worker.snaps.installed', 'tls_client.ca.saved', 'tls_client.certs.saved', 'kube-control.dns.available', 'kube-control.auth.available', 'cni.available', 'kubernetes-worker.restart-needed', @@ -620,16 +635,17 @@ def watch_for_changes(): @when_not('kubernetes-worker.cloud.pending', 'kubernetes-worker.cloud.blocked', 'upgrade.series.in-progress') +@when_any('kube-control.api_endpoints.available', + 'kube-api-endpoint.available') def start_worker(): ''' Start kubelet using the provided API and DNS info.''' # Note that the DNS server doesn't necessarily exist at this point. We know # what its IP will eventually be, though, so we can go ahead and configure # kubelet with that info. This ensures that early pods are configured with # the correct DNS even though the server isn't ready yet. - kube_api = endpoint_from_flag('kube-api-endpoint.available') kube_control = endpoint_from_flag('kube-control.dns.available') - servers = get_kube_api_servers(kube_api) + servers = get_kube_api_servers() dns = kube_control.get_dns() ingress_ip = get_node_ip() cluster_cidr = kubernetes_common.cluster_cidr() @@ -638,6 +654,10 @@ def start_worker(): hookenv.log('Waiting for cluster cidr.') return + if not servers: + hookenv.log("Waiting for API server URL") + return + if kubernetes_common.is_ipv6(cluster_cidr): kubernetes_common.enable_ipv6_forwarding() @@ -662,7 +682,7 @@ def start_worker(): def configure_cni(cni): ''' Set worker configuration on the CNI relation. This lets the CNI subordinate know that we're the worker so it can respond accordingly. ''' - cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path) + cni.set_config(is_master=False) @when('config.changed.labels') @@ -761,6 +781,9 @@ def create_config(server, creds): token=creds['kubelet_token'], user='kubelet') create_kubeconfig(kubeproxyconfig_path, server, ca_crt_path, token=creds['proxy_token'], user='kube-proxy') + cni = endpoint_from_name('cni') + if cni: + cni.notify_kubeconfig_changed() def merge_kubelet_extra_config(config, extra_config): @@ -812,92 +835,67 @@ def configure_kubelet(dns, ingress_ip): kubelet_opts['cloud-config'] = str(kubelet_cloud_config_path) kubelet_opts['provider-id'] = azure.vm_id - if get_version('kubelet') >= (1, 10): - # Put together the KubeletConfiguration data - kubelet_config = { - 'apiVersion': 'kubelet.config.k8s.io/v1beta1', - 'kind': 'KubeletConfiguration', - 'address': '0.0.0.0', - 'authentication': { - 'anonymous': { - 'enabled': False - }, - 'x509': { - 'clientCAFile': str(ca_crt_path) - } + # Put together the KubeletConfiguration data + kubelet_config = { + 'apiVersion': 'kubelet.config.k8s.io/v1beta1', + 'kind': 'KubeletConfiguration', + 'address': '0.0.0.0', + 'authentication': { + 'anonymous': { + 'enabled': False }, - # NB: authz webhook config tells the kubelet to ask the api server - # if a request is authorized; it is not related to the authn - # webhook config of the k8s master services. - 'authorization': { - 'mode': 'Webhook' - }, - 'clusterDomain': dns['domain'], - 'failSwapOn': False, - 'port': 10250, - 'protectKernelDefaults': True, - 'readOnlyPort': 0, - 'tlsCertFile': str(server_crt_path), - 'tlsPrivateKeyFile': str(server_key_path) - } - if dns['enable-kube-dns']: - kubelet_config['clusterDNS'] = [dns['sdn-ip']] + 'x509': { + 'clientCAFile': str(ca_crt_path) + } + }, + # NB: authz webhook config tells the kubelet to ask the api server + # if a request is authorized; it is not related to the authn + # webhook config of the k8s master services. + 'authorization': { + 'mode': 'Webhook' + }, + 'clusterDomain': dns['domain'], + 'failSwapOn': False, + 'port': 10250, + 'protectKernelDefaults': True, + 'readOnlyPort': 0, + 'tlsCertFile': str(server_crt_path), + 'tlsPrivateKeyFile': str(server_key_path) + } + if dns['enable-kube-dns']: + kubelet_config['clusterDNS'] = [dns['sdn-ip']] - # Handle feature gates - feature_gates = {} - if get_version('kubelet') >= (1, 19): - # NB: required for CIS compliance - feature_gates['RotateKubeletServerCertificate'] = True - if is_state('kubernetes-worker.gpu.enabled'): - feature_gates['DevicePlugins'] = True - if feature_gates: - kubelet_config['featureGates'] = feature_gates - if kubernetes_common.is_dual_stack(kubernetes_common.cluster_cidr()): - feature_gates = kubelet_config.setdefault('featureGates', {}) - feature_gates['IPv6DualStack'] = True + # Handle feature gates + feature_gates = {} + if get_version('kubelet') >= (1, 19): + # NB: required for CIS compliance + feature_gates['RotateKubeletServerCertificate'] = True + if is_state('kubernetes-worker.gpu.enabled'): + feature_gates['DevicePlugins'] = True + if feature_gates: + kubelet_config['featureGates'] = feature_gates + if kubernetes_common.is_dual_stack(kubernetes_common.cluster_cidr()): + feature_gates = kubelet_config.setdefault('featureGates', {}) + feature_gates['IPv6DualStack'] = True - # Workaround for DNS on bionic - # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 - resolv_path = os.path.realpath('/etc/resolv.conf') - if resolv_path == '/run/systemd/resolve/stub-resolv.conf': - kubelet_config['resolvConf'] = '/run/systemd/resolve/resolv.conf' + # Workaround for DNS on bionic + # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 + resolv_path = os.path.realpath('/etc/resolv.conf') + if resolv_path == '/run/systemd/resolve/stub-resolv.conf': + kubelet_config['resolvConf'] = '/run/systemd/resolve/resolv.conf' - # Add kubelet-extra-config. This needs to happen last so that it - # overrides any config provided by the charm. - kubelet_extra_config = hookenv.config('kubelet-extra-config') - kubelet_extra_config = yaml.safe_load(kubelet_extra_config) - merge_kubelet_extra_config(kubelet_config, kubelet_extra_config) + # Add kubelet-extra-config. This needs to happen last so that it + # overrides any config provided by the charm. + kubelet_extra_config = hookenv.config('kubelet-extra-config') + kubelet_extra_config = yaml.safe_load(kubelet_extra_config) + merge_kubelet_extra_config(kubelet_config, kubelet_extra_config) - # Render the file and configure Kubelet to use it - os.makedirs('/root/cdk/kubelet', exist_ok=True) - with open('/root/cdk/kubelet/config.yaml', 'w') as f: - f.write('# Generated by kubernetes-worker charm, do not edit\n') - yaml.dump(kubelet_config, f) - kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml' - else: - # NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove - # this whole block and the parent if statement. - kubelet_opts['address'] = '0.0.0.0' - kubelet_opts['anonymous-auth'] = 'false' - kubelet_opts['client-ca-file'] = str(ca_crt_path) - kubelet_opts['cluster-domain'] = dns['domain'] - kubelet_opts['fail-swap-on'] = 'false' - kubelet_opts['port'] = '10250' - kubelet_opts['tls-cert-file'] = str(server_crt_path) - kubelet_opts['tls-private-key-file'] = str(server_key_path) - if dns['enable-kube-dns']: - kubelet_opts['cluster-dns'] = dns['sdn-ip'] - if is_state('kubernetes-worker.gpu.enabled'): - kubelet_opts['feature-gates'] = 'DevicePlugins=true' - - # Workaround for DNS on bionic, for k8s 1.9 - # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655 - resolv_path = os.path.realpath('/etc/resolv.conf') - if resolv_path == '/run/systemd/resolve/stub-resolv.conf': - kubelet_opts['resolv-conf'] = '/run/systemd/resolve/resolv.conf' - - if get_version('kubelet') >= (1, 11): - kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config' + # Render the file and configure Kubelet to use it + os.makedirs('/root/cdk/kubelet', exist_ok=True) + with open('/root/cdk/kubelet/config.yaml', 'w') as f: + f.write('# Generated by kubernetes-worker charm, do not edit\n') + yaml.dump(kubelet_config, f) + kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml' # If present, ensure kubelet gets the pause container from the configured # registry. When not present, kubelet uses a default image location @@ -907,6 +905,8 @@ def configure_kubelet(dns, ingress_ip): kubelet_opts['pod-infra-container-image'] = \ '{}/pause:3.4.1'.format(registry_location) + workaround_lxd_kernel_params() + configure_kubernetes_service(configure_prefix, 'kubelet', kubelet_opts, 'kubelet-extra-args') @@ -999,7 +999,7 @@ def render_and_launch_ingress(): context['ingress_uid'] = '101' context['ingress_image'] = '/'.join([ registry_location or 'us.gcr.io', - 'k8s-artifacts-prod/ingress-nginx/controller:v0.45.0', + 'k8s-artifacts-prod/ingress-nginx/controller:v1.0.0-beta.3', ]) kubelet_version = get_version('kubelet') @@ -1073,25 +1073,32 @@ def restart_unit_services(): service_restart('snap.%s.daemon' % service) -def get_kube_api_servers(kube_api): - '''Return the kubernetes api server address and port for this - relationship.''' - hosts = [] - # Iterate over every service from the relation object. - for service in kube_api.services(): - for unit in service['hosts']: - hosts.append('https://{0}:{1}'.format(unit['hostname'], - unit['port'])) - return hosts +def get_kube_api_servers(): + '''Return the list of kubernetes API endpoint URLs.''' + kube_control = endpoint_from_name("kube-control") + kube_api = endpoint_from_name("kube-api-endpoint") + # prefer kube-api-endpoints + if kube_api.services(): + return [ + 'https://{0}:{1}'.format(unit['hostname'], unit['port']) + for service in kube_api.services() + for unit in service['hosts'] + ] + if hasattr(kube_control, "get_api_endpoints"): + return kube_control.get_api_endpoints() + hookenv.log("Unable to determine API server URLs from either kube-control " + "or kube-api-endpoint relation", hookenv.ERROR) + return [] @when('kubernetes-worker.config.created') @when('nrpe-external-master.available') -@when('kube-api-endpoint.available') @when('kube-control.auth.available') @when_any('config.changed.nagios_context', 'config.changed.nagios_servicegroups', 'nrpe-external-master.reconfigure') +@when_any('kube-control.api_endpoints.available', + 'kube-api-endpoint.available') def update_nrpe_config(): services = ['snap.{}.daemon'.format(s) for s in worker_services] data = render('nagios_plugin.py', context={'node_name': get_node_name()}) @@ -1107,9 +1114,8 @@ def update_nrpe_config(): nrpe_setup.write() creds = db.get('credentials') - if creds: - kube_api = endpoint_from_flag('kube-api-endpoint.available') - servers = get_kube_api_servers(kube_api) + servers = get_kube_api_servers() + if creds and servers: server = servers[get_unit_number() % len(servers)] create_kubeconfig(nrpe_kubeconfig_path, server, ca_crt_path, token=creds['client_token'], user='nagios') @@ -1121,6 +1127,12 @@ def update_nrpe_config(): remove_state('nrpe-external-master.reconfigure') set_state('nrpe-external-master.initial-config') + # request CPU governor check from nrpe relation to be performance + rel_settings = { + 'requested_cpu_governor': 'performance', + } + for rid in hookenv.relation_ids('nrpe-external-master'): + hookenv.relation_set(relation_id=rid, relation_settings=rel_settings) @when_not('nrpe-external-master.available') @@ -1146,13 +1158,6 @@ def enable_gpu(): """Enable GPU usage on this node. """ - if get_version('kubelet') < (1, 9): - hookenv.status_set( - 'active', - 'Upgrade to snap channel >= 1.9/stable to enable GPU support.' - ) - return - hookenv.log('Enabling gpu mode') try: # Not sure why this is necessary, but if you don't run this, k8s will @@ -1237,7 +1242,6 @@ def catch_change_in_creds(kube_control): set_state('kubernetes-worker.restart-needed') -@when_not('kube-control.connected') def missing_kube_control(): """Inform the operator they need to add the kube-control relation. @@ -1245,6 +1249,7 @@ def missing_kube_control(): a charm in a deployment that pre-dates the kube-control relation, it'll be missing. + Called from charm_status. """ try: goal_state = hookenv.goal_state() @@ -1252,14 +1257,18 @@ def missing_kube_control(): goal_state = {} if 'kube-control' in goal_state.get('relations', {}): - hookenv.status_set( - 'waiting', - 'Waiting for kubernetes-master to become ready') + if not is_flag_set("kube-control.connected"): + hookenv.status_set( + 'waiting', + 'Waiting for kubernetes-master to become ready') + return True else: hookenv.status_set( 'blocked', 'Relate {}:kube-control kubernetes-master:kube-control'.format( hookenv.service_name())) + return True + return False def _systemctl_is_active(application): @@ -1542,3 +1551,34 @@ def configure_default_cni(): @when('ingress-proxy.available') def configure_ingress_proxy(ingress_proxy): ingress_proxy.configure(port='80') + + +def workaround_lxd_kernel_params(): + ''' + Workaround for kubelet not starting in LXD when kernel params are not set + to the desired values. + ''' + if is_container(): + hookenv.log('LXD detected, faking kernel params via bind mounts') + root_dir = '/root/cdk/lxd-kernel-params' + os.makedirs(root_dir, exist_ok=True) + # Kernel params taken from: + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/kubelet/cm/container_manager_linux.go#L421-L426 + # https://github.com/kubernetes/kubernetes/blob/v1.22.0/pkg/util/sysctl/sysctl.go#L30-L64 + params = { + 'vm.overcommit_memory': 1, + 'vm.panic_on_oom': 0, + 'kernel.panic': 10, + 'kernel.panic_on_oops': 1, + 'kernel.keys.root_maxkeys': 1000000, + 'kernel.keys.root_maxbytes': 1000000 * 25 + } + for param, param_value in params.items(): + fake_param_path = root_dir + '/' + param + with open(fake_param_path, 'w') as f: + f.write(str(param_value)) + real_param_path = '/proc/sys/' + param.replace('.', '/') + fstab_add(fake_param_path, real_param_path, 'none', 'bind') + subprocess.check_call(['mount', '-a']) + else: + hookenv.log('LXD not detected, not faking kernel params') diff --git a/kubernetes-worker/registry-configmap.yaml b/kubernetes-worker/registry-configmap.yaml deleted file mode 100644 index b34c736..0000000 --- a/kubernetes-worker/registry-configmap.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -data: - proxy-body-size: 1024m -kind: ConfigMap -metadata: - name: nginx-configuration - namespace: ingress-nginx-kubernetes-worker diff --git a/kubernetes-worker/script/bootstrap b/kubernetes-worker/script/bootstrap deleted file mode 100644 index b69771c..0000000 --- a/kubernetes-worker/script/bootstrap +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -x - -sudo apt update -sudo apt install -qyf docker.io -sudo snap install charm --classic -sudo snap install yq diff --git a/kubernetes-worker/script/build b/kubernetes-worker/script/build deleted file mode 100644 index 6bbbc48..0000000 --- a/kubernetes-worker/script/build +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm build -r --force -o "$CHARM_BUILD_DIR" diff --git a/kubernetes-worker/script/upload b/kubernetes-worker/script/upload deleted file mode 100644 index 548195b..0000000 --- a/kubernetes-worker/script/upload +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -set -x - -export PATH=/snap/bin:$PATH - -: "${CHARM_BUILD_DIR:=/tmp/charms}" - -charm whoami -RET=$? -if ((RET > 0)); then - echo "Not logged into charmstore" - exit 1 -fi - -function generate::attachments -{ - ./build-cni-resources.sh - for resource in *.tgz; do - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished "${resource%.*}"="$resource" - done - - snap_placeholders=(core kubectl kubelet kube-proxy) - mkdir -p placeholders - for snap in ${snap_placeholders[@]}; do - touch placeholders/"$snap.snap" - charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished "$snap"=placeholders/"$snap".snap - done -} - -function generate::resource::argument -{ - py_script=" -import sys -import json -resources_json = json.load(sys.stdin) -resource_map = [] -for item in resources_json: - resource_map.append(f\"--resource {item['Name']}-{item['Revision']}\") - -print(' '.join(resource_map)) -" - charm list-resources cs:~"$NAMESPACE"/"$CHARM" --channel unpublished --format json | env python3 -c "$py_script" -} - -URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url) -generate::attachments - -if [ "$CHANNEL" != unpublished ]; then - charm release "$URL" --channel "$CHANNEL" $(generate::resource::argument) -fi diff --git a/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml b/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml new file mode 100644 index 0000000..a12c402 --- /dev/null +++ b/kubernetes-worker/templates/cdk.auth-webhook-secret.yaml @@ -0,0 +1,13 @@ +# Manifest for CK secrets that auth-webhook expects +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ secret_name }} + namespace: {{ secret_namespace }} +type: {{ type }} +data: + uid: {{ user }} + username: {{ username }} + password: {{ password }} + groups: '{{ groups }}' diff --git a/kubernetes-worker/templates/ingress-daemon-set.yaml b/kubernetes-worker/templates/ingress-daemon-set.yaml index 646856b..72d0fcd 100644 --- a/kubernetes-worker/templates/ingress-daemon-set.yaml +++ b/kubernetes-worker/templates/ingress-daemon-set.yaml @@ -69,7 +69,7 @@ metadata: cdk-{{ juju_application }}-ingress: "true" --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: nginx-ingress-clusterrole-{{ juju_application }} @@ -104,15 +104,8 @@ rules: - list - watch - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - "extensions" - - "networking.k8s.io" + - extensions + - "networking.k8s.io" # # k8s 1.14+ resources: - ingresses verbs: @@ -120,14 +113,29 @@ rules: - list - watch - apiGroups: - - "extensions" - - "networking.k8s.io" + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ resources: - ingresses/status verbs: - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: nginx-ingress-role-{{ juju_application }} @@ -140,9 +148,6 @@ rules: - apiGroups: - "" resources: - - configmaps - - pods - - secrets - namespaces verbs: - get @@ -150,12 +155,51 @@ rules: - "" resources: - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" resourceNames: - # Defaults to "-" - # Here: "-" - # This has to be adapted if you change either parameter - # when launching the nginx-ingress-controller. - - "ingress-controller-leader-nginx" + - ingress-controller-leader + resources: + - configmaps verbs: - get - update @@ -168,12 +212,12 @@ rules: - apiGroups: - "" resources: - - endpoints + - events verbs: - - get - + - create + - patch --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding-{{ juju_application }} @@ -192,7 +236,7 @@ subjects: namespace: ingress-nginx-{{ juju_application }} --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: nginx-ingress-clusterrole-nisa-binding-{{ juju_application }} @@ -298,3 +342,15 @@ spec: timeoutSeconds: 1 --- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/name: ingress-nginx-{{ juju_application }} + app.kubernetes.io/part-of: ingress-nginx-{{ juju_application }} + cdk-{{ juju_application }}-ingress: "true" + name: nginx-ingress-controller + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +spec: + controller: k8s.io/ingress-nginx diff --git a/kubernetes-worker/templates/nagios_plugin.py b/kubernetes-worker/templates/nagios_plugin.py index 958b4ed..3e4a9b4 100644 --- a/kubernetes-worker/templates/nagios_plugin.py +++ b/kubernetes-worker/templates/nagios_plugin.py @@ -51,8 +51,7 @@ def check_node(node): msg.append(check['error']) if check['type'] == 'error': error = True - else: - break + break else: err_msg = 'Unable to find status for {}'.format(check['error']) raise nagios_plugin3.CriticalError(err_msg) diff --git a/kubernetes-worker/templates/registry.yaml b/kubernetes-worker/templates/registry.yaml deleted file mode 100644 index 90c1ac7..0000000 --- a/kubernetes-worker/templates/registry.yaml +++ /dev/null @@ -1,118 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: registry-tls-data -type: Opaque -data: - tls.crt: {{ tlscert }} - tls.key: {{ tlskey }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: registry-auth-data -type: Opaque -data: - htpasswd: {{ htpasswd }} ---- -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - labels: - k8s-app: kube-registry - version: v0 - kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 - kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: {{ registry|default("docker.io") }}/cdkbot/registry-{{ arch }}:2.6 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - - name: REGISTRY_AUTH_HTPASSWD_REALM - value: basic_realm - - name: REGISTRY_AUTH_HTPASSWD_PATH - value: /auth/htpasswd - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - - name: auth-dir - mountPath: /auth - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumes: - - name: image-store - hostPath: - path: /srv/registry - - name: auth-dir - secret: - secretName: registry-auth-data ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-registry - labels: - k8s-app: kube-registry - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeRegistry" -spec: - selector: - k8s-app: kube-registry - type: LoadBalancer - ports: - - name: registry - port: 5000 - protocol: TCP ---- -apiVersion: v1 -kind: Secret -metadata: - name: registry-access -data: - .dockercfg: {{ dockercfg }} -type: kubernetes.io/dockercfg -{%- if ingress %} ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: registry-ing -spec: - tls: - - hosts: - - {{ domain }} - secretName: registry-tls-data - rules: - - host: {{ domain }} - http: - paths: - - backend: - serviceName: kube-registry - servicePort: 5000 - path: / -{% endif %} diff --git a/kubernetes-worker/tests/functional/conftest.py b/kubernetes-worker/tests/functional/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-worker/tests/functional/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-worker/tests/functional/test_k8s_common.py b/kubernetes-worker/tests/functional/test_k8s_common.py new file mode 100644 index 0000000..4b867e6 --- /dev/null +++ b/kubernetes-worker/tests/functional/test_k8s_common.py @@ -0,0 +1,90 @@ +from functools import partial + +import pytest +from unittest import mock +from charms.layer import kubernetes_common + + +class TestCreateKubeConfig: + @pytest.fixture(autouse=True) + def _files(self, tmp_path): + self.cfg_file = tmp_path / "config" + self.ca_file = tmp_path / "ca.crt" + self.ca_file.write_text("foo") + self.ckc = partial( + kubernetes_common.create_kubeconfig, + self.cfg_file, + "server", + self.ca_file, + ) + + def test_guard_clauses(self): + with pytest.raises(ValueError): + self.ckc() + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(token="token", password="password") + assert not self.cfg_file.exists() + with pytest.raises(ValueError): + self.ckc(key="key") + assert not self.cfg_file.exists() + + def test_file_creation(self): + self.ckc(password="password") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert cfg_data_1 + + def test_idempotency(self): + self.ckc(password="password") + cfg_data_1 = self.cfg_file.read_text() + self.ckc(password="password") + cfg_data_2 = self.cfg_file.read_text() + # Verify that calling w/ the same data keeps the same file contents. + assert cfg_data_2 == cfg_data_1 + + def test_efficient_updates(self): + self.ckc(password="old_password") + cfg_stat_1 = self.cfg_file.stat() + self.ckc(password="old_password") + cfg_stat_2 = self.cfg_file.stat() + self.ckc(password="new_password") + cfg_stat_3 = self.cfg_file.stat() + # Verify that calling with the same data doesn't + # modify the file at all, but that new data does + assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime + + def test_aws_iam(self): + self.ckc(password="password", aws_iam_cluster_id="aws-cluster") + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "aws-cluster" in cfg_data_1 + + def test_keystone(self): + self.ckc(password="password", keystone=True) + assert self.cfg_file.exists() + cfg_data_1 = self.cfg_file.read_text() + assert "keystone-user" in cfg_data_1 + assert "exec" in cfg_data_1 + + def test_atomic_updates(self): + self.ckc(password="old_password") + with self.cfg_file.open("rt") as f: + # Perform a write in the middle of reading + self.ckc(password="new_password") + # Read data from existing FH after new data was written + cfg_data_1 = f.read() + # Read updated data + cfg_data_2 = self.cfg_file.read_text() + # Verify that the in-progress read didn't get any of the new data + assert cfg_data_1 != cfg_data_2 + assert "old_password" in cfg_data_1 + assert "new_password" in cfg_data_2 + + @mock.patch("charmhelpers.core.hookenv.network_get", autospec=True) + def test_get_ingress_address(self, network_get): + network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]} + ingress = kubernetes_common.get_ingress_address("endpoint-name") + assert ingress == "1.2.3.4" + ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"]) + assert ingress == "5.6.7.8" diff --git a/kubernetes-worker/tests/unit/conftest.py b/kubernetes-worker/tests/unit/conftest.py new file mode 100644 index 0000000..a92e249 --- /dev/null +++ b/kubernetes-worker/tests/unit/conftest.py @@ -0,0 +1,4 @@ +import charms.unit_test + + +charms.unit_test.patch_reactive() diff --git a/kubernetes-worker/tests/unit/test_k8s_common.py b/kubernetes-worker/tests/unit/test_k8s_common.py new file mode 100644 index 0000000..0dcad31 --- /dev/null +++ b/kubernetes-worker/tests/unit/test_k8s_common.py @@ -0,0 +1,122 @@ +import json +import string +from subprocess import CalledProcessError +from unittest.mock import Mock + +from charms.layer import kubernetes_common as kc + + +def test_token_generator(): + alphanum = string.ascii_letters + string.digits + token = kc.token_generator(10) + assert len(token) == 10 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_get_secret_names(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + FileNotFoundError, + "{}".encode("utf8"), + json.dumps( + { + "items": [ + { + "metadata": {"name": "secret-id"}, + "data": {"username": "dXNlcg=="}, + }, + ], + } + ).encode("utf8"), + ] + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {} + assert kc.get_secret_names() == {"user": "secret-id"} + + +def test_generate_rfc1123(): + alphanum = string.ascii_letters + string.digits + token = kc.generate_rfc1123(1000) + assert len(token) == 253 + unknown_chars = set(token) - set(alphanum) + assert not unknown_chars + + +def test_create_secret(monkeypatch): + monkeypatch.setattr(kc, "render", Mock()) + monkeypatch.setattr(kc, "kubectl_manifest", Mock()) + monkeypatch.setattr(kc, "get_secret_names", Mock()) + monkeypatch.setattr(kc, "generate_rfc1123", Mock()) + kc.kubectl_manifest.side_effect = [True, False] + kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}] + kc.generate_rfc1123.return_value = "foo" + assert kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "secret-id", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + assert not kc.create_secret("token", "username", "user", "groups") + assert kc.render.call_args[1]["context"] == { + "groups": "Z3JvdXBz", + "password": "dXNlcjo6dG9rZW4=", + "secret_name": "auth-user-foo", + "secret_namespace": "kube-system", + "type": "juju.is/token-auth", + "user": "dXNlcg==", + "username": "dXNlcm5hbWU=", + } + + +def test_get_secret_password(monkeypatch): + monkeypatch.setattr(kc, "kubectl", Mock()) + monkeypatch.setattr(kc, "Path", Mock()) + monkeypatch.setattr(kc, "yaml", Mock()) + kc.kubectl.side_effect = [ + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + CalledProcessError(1, "none"), + FileNotFoundError, + json.dumps({}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": []}).encode("utf8"), + json.dumps({"items": [{}]}).encode("utf8"), + json.dumps({"items": [{"data": {}}]}).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + json.dumps( + {"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]} + ).encode("utf8"), + ] + kc.yaml.safe_load.side_effect = [ + {}, + {"users": None}, + {"users": []}, + {"users": [{"user": {}}]}, + {"users": [{"user": {"token": "secret"}}]}, + ] + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") is None + assert kc.get_secret_password("admin") == "secret" + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") is None + assert kc.get_secret_password("username") == "secret" diff --git a/kubernetes-worker/tox.ini b/kubernetes-worker/tox.ini index 9350363..a6671d4 100644 --- a/kubernetes-worker/tox.ini +++ b/kubernetes-worker/tox.ini @@ -1,18 +1,35 @@ +[flake8] +max-line-length = 88 + [tox] skipsdist = True -envlist=lint +envlist = lint,unit,functional -[flake8] -max-complexity=10 -max-line-length=120 -ignore=E203,E402,W503,E231 +[testenv] +setenv = + PYTHONPATH={toxinidir}:{toxinidir}/lib + PYTHONBREAKPOINT=ipdb.set_trace [testenv:lint] -basepython=python3 -sitepackages=False -deps= - flake8 +deps = black -commands= - flake8 {posargs:lib/ reactive/} - black --line-length=120 {posargs:lib/ reactive/} + flake8 +commands = + flake8 {toxinidir}/lib {toxinidir}/tests + black --check {toxinidir}/lib {toxinidir}/tests + +[testenv:unit] +deps = + pyyaml + pytest + charms.unit_test + ipdb +commands = pytest -vv --tb native -s {posargs} {toxinidir}/tests/unit + +[testenv:functional] +deps = + pyyaml + pytest + charms.unit_test + ipdb +commands = pytest -vv --tb native -s {posargs} {toxinidir}/tests/functional diff --git a/kubernetes-worker/version b/kubernetes-worker/version index 1dea0b1..20817dd 100644 --- a/kubernetes-worker/version +++ b/kubernetes-worker/version @@ -1 +1 @@ -e247aeff \ No newline at end of file +ccfa68be \ No newline at end of file diff --git a/kubernetes-worker/wheelhouse.txt b/kubernetes-worker/wheelhouse.txt index 39d6a8c..4d586af 100644 --- a/kubernetes-worker/wheelhouse.txt +++ b/kubernetes-worker/wheelhouse.txt @@ -3,9 +3,11 @@ # even with installing setuptools before upgrading pip ends up with pip seeing # the older setuptools at the system level if include_system_packages is true pip>=18.1,<19.0 -# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty +# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5 +# for trusty Jinja2<=2.10.1 PyYAML<=5.2 +MarkupSafe<2.0.0 setuptools<42 setuptools-scm<=1.17.0 charmhelpers>=0.4.0,<1.0.0 @@ -15,7 +17,10 @@ wheel<0.34 netaddr<=0.7.19 # layer:snap -tenacity +# Newer versions of tenacity rely on `typing` which is in stdlib in +# python3.5 but not python3.4. We want to continue to support +# python3.4 (Trusty) +tenacity<5.0.4 # kubernetes-worker charms.templating.jinja2>=0.0.1,<2.0.0 diff --git a/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz b/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz deleted file mode 100644 index ca65d07..0000000 Binary files a/kubernetes-worker/wheelhouse/charmhelpers-0.20.21.tar.gz and /dev/null differ diff --git a/kubernetes-worker/wheelhouse/charmhelpers-0.20.23.tar.gz b/kubernetes-worker/wheelhouse/charmhelpers-0.20.23.tar.gz new file mode 100644 index 0000000..8fbc8ec Binary files /dev/null and b/kubernetes-worker/wheelhouse/charmhelpers-0.20.23.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz b/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz deleted file mode 100644 index 0d5fd76..0000000 Binary files a/kubernetes-worker/wheelhouse/pyaml-20.4.0.tar.gz and /dev/null differ diff --git a/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz b/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz new file mode 100644 index 0000000..b19aad3 Binary files /dev/null and b/kubernetes-worker/wheelhouse/pyaml-21.10.1.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz b/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz deleted file mode 100644 index 63329e4..0000000 Binary files a/kubernetes-worker/wheelhouse/six-1.15.0.tar.gz and /dev/null differ diff --git a/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz b/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz new file mode 100644 index 0000000..5bf3a27 Binary files /dev/null and b/kubernetes-worker/wheelhouse/six-1.16.0.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz b/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz new file mode 100644 index 0000000..c7d05ba Binary files /dev/null and b/kubernetes-worker/wheelhouse/tenacity-5.0.3.tar.gz differ diff --git a/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz b/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz deleted file mode 100644 index 2050c4d..0000000 Binary files a/kubernetes-worker/wheelhouse/tenacity-7.0.0.tar.gz and /dev/null differ