update kubernetest 1.22

This commit is contained in:
xiafan 2023-06-29 14:22:27 +08:00
parent d45d36c191
commit 35f601a2a0
1837 changed files with 129940 additions and 0 deletions

601
calico/.build.manifest Normal file
View File

@ -0,0 +1,601 @@
{
"layers": [
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
"url": "layer:options"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083",
"url": "layer:basic"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
"url": "layer:leadership"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
"url": "layer:status"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156",
"url": "layer:kubernetes-common"
},
{
"branch": "refs/heads/stable",
"rev": "96b4e06d5d35fec30cdf2cc25076dd25c51b893c",
"url": "calico"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382",
"url": "interface:etcd"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1",
"url": "interface:kubernetes-cni"
}
],
"signatures": {
".build.manifest": [
"build",
"dynamic",
"unchecked"
],
".github/workflows/main.yml": [
"layer:kubernetes-common",
"static",
"d4f8fec0456cb2fc05993253a995983488a76fbbef10c2ee40649e83d6c9e078"
],
".github/workflows/tox.yaml": [
"calico",
"static",
"8b7dba2bd100fc3dfce764499b0eba1799b58469701b032b238cb1d0055c44bb"
],
".gitignore": [
"calico",
"static",
"3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4"
],
"CONTRIBUTING.md": [
"calico",
"static",
"fa04ec96762f4edc071c7b0097223c121e33fd6769226562681646577d7b1146"
],
"DEVELOPING.md": [
"calico",
"static",
"ccb2d8ad4b5c328d810c53fa43b41f6641af0f002a45d548f6ed9d9f546d3dbe"
],
"LICENSE": [
"calico",
"static",
"58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
],
"Makefile": [
"layer:basic",
"static",
"b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
],
"README.md": [
"calico",
"static",
"d2d26569f5a63b1be2e23835346ed2e8b0b13cdd74a6efb161221d2462a58dc5"
],
"bin/charm-env": [
"layer:basic",
"static",
"fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
],
"bin/layer_option": [
"layer:options",
"static",
"e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
],
"build-calico-resource.sh": [
"calico",
"static",
"1c98f05945166e17cf9c530a6ee064092a323e5529639474b07f380210959acb"
],
"config.yaml": [
"calico",
"dynamic",
"d75dd7b4ddd803d88c5d86b14826fa7f047b8e6907885cafe37cda29afb3c13d"
],
"copyright": [
"layer:status",
"static",
"7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
],
"copyright.layer-basic": [
"layer:basic",
"static",
"f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
],
"copyright.layer-leadership": [
"layer:leadership",
"static",
"8ce407829378fc0f72ce44c7f624e4951c7ccb3db1cfb949bee026b701728cc9"
],
"copyright.layer-options": [
"layer:options",
"static",
"f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
],
"docs/status.md": [
"layer:status",
"static",
"975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
],
"exec.d/docker-compose/charm-pre-install": [
"calico",
"static",
"2760db1047cdc4beeb974923c693bf824d45a9ee88fb50496efada92461aceb8"
],
"hooks/cni-relation-broken": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/cni-relation-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/cni-relation-created": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/cni-relation-departed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/cni-relation-joined": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/config-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/etcd-relation-broken": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/etcd-relation-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/etcd-relation-created": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/etcd-relation-departed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/etcd-relation-joined": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/hook.template": [
"layer:basic",
"static",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/install": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/leader-elected": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/leader-settings-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/post-series-upgrade": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/pre-series-upgrade": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/relations/etcd/.gitignore": [
"interface:etcd",
"static",
"cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
],
"hooks/relations/etcd/README.md": [
"interface:etcd",
"static",
"93873d073f5f5302d352e09321aaf87458556e9730f89e1c682699c1d0db2386"
],
"hooks/relations/etcd/__init__.py": [
"interface:etcd",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/etcd/interface.yaml": [
"interface:etcd",
"static",
"ba9f723b57a434f7efb2c06abec4167cd412c16da5f496a477dd7691e9a715be"
],
"hooks/relations/etcd/peers.py": [
"interface:etcd",
"static",
"99419c3d139fb5bb90021e0482f9e7ac2cfb776fb7af79b46209c6a75b36e834"
],
"hooks/relations/etcd/provides.py": [
"interface:etcd",
"static",
"3db1f644ab669e2dec59d59b61de63b721bc05b38fe646e525fff8f0d60982f9"
],
"hooks/relations/etcd/requires.py": [
"interface:etcd",
"static",
"8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
],
"hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [
"interface:kubernetes-cni",
"static",
"d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c"
],
"hooks/relations/kubernetes-cni/.gitignore": [
"interface:kubernetes-cni",
"static",
"0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe"
],
"hooks/relations/kubernetes-cni/README.md": [
"interface:kubernetes-cni",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/kubernetes-cni/__init__.py": [
"interface:kubernetes-cni",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/kubernetes-cni/interface.yaml": [
"interface:kubernetes-cni",
"static",
"03affdaf7e879adfdf8c434aa31d40faa6d2872faa7dfd93a5d3a1ebae02487d"
],
"hooks/relations/kubernetes-cni/provides.py": [
"interface:kubernetes-cni",
"static",
"e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee"
],
"hooks/relations/kubernetes-cni/requires.py": [
"interface:kubernetes-cni",
"static",
"45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe"
],
"hooks/relations/kubernetes-cni/tox.ini": [
"interface:kubernetes-cni",
"static",
"f08626c9b65362031edb07f96f15f101bc3dda075abc64f54d1c83efd2c05e39"
],
"hooks/start": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/stop": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/update-status": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/upgrade-charm": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"icon.svg": [
"calico",
"static",
"49b68e61506d639d3c859e9477338469d1d44f7b76ad381ff152c728c71c43d9"
],
"layer.yaml": [
"calico",
"dynamic",
"3a95aaa6fd50027d9a98ad9322568cfb0c228135df7cbff79953a86d01ec533f"
],
"lib/calico_common.py": [
"calico",
"static",
"ec886f86a4505148016a540652c51afd7bf8ee4ef3b21db368e10ded2b9569be"
],
"lib/calico_upgrade.py": [
"calico",
"static",
"1200e9016b1db2f2a853033d04126adff1d4d43ccb29c48a613232e06f33a8c4"
],
"lib/charms/layer/__init__.py": [
"layer:basic",
"static",
"dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
],
"lib/charms/layer/basic.py": [
"layer:basic",
"static",
"98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9"
],
"lib/charms/layer/execd.py": [
"layer:basic",
"static",
"fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
],
"lib/charms/layer/kubernetes_common.py": [
"layer:kubernetes-common",
"static",
"29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b"
],
"lib/charms/layer/options.py": [
"layer:options",
"static",
"8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
],
"lib/charms/layer/status.py": [
"layer:status",
"static",
"d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
],
"lib/charms/leadership.py": [
"layer:leadership",
"static",
"20ffcbbc08147506759726ad51567420659ffb8a2e0121079240b8706658e332"
],
"make_docs": [
"layer:status",
"static",
"c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
],
"metadata.yaml": [
"calico",
"dynamic",
"b1a1e252fb9eac35a8b1a10564b400a07d5c810d8ceed1a1e3460bea314886bb"
],
"pydocmd.yml": [
"layer:status",
"static",
"11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
],
"reactive/__init__.py": [
"layer:leadership",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"reactive/calico.py": [
"calico",
"static",
"6b8bef93b474c95bab4d9df09b74b8082f230fa5e906b469fae66baa319472ad"
],
"reactive/leadership.py": [
"layer:leadership",
"static",
"e2b233cf861adc3b2d9e9c062134ce2f104953f03283cdddd88f49efee652e8f"
],
"reactive/status.py": [
"layer:status",
"static",
"30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
],
"requirements.txt": [
"layer:basic",
"static",
"a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
],
"templates/10-calico.conflist": [
"calico",
"static",
"9332e14d9422781530cd13fef5748e3d06fcce7f4221123f625c3a7e09238abb"
],
"templates/calico-node.service": [
"calico",
"static",
"cc80a397a77f7d80740c697fcdaffd373790492b01959649587345bdcfe44fe3"
],
"templates/calicoctl": [
"calico",
"static",
"b913dfdce8de51aa9a13950817e4101f8f4229052927a272fff5b37a4370537f"
],
"templates/cdk.auth-webhook-secret.yaml": [
"layer:kubernetes-common",
"static",
"efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec"
],
"templates/policy-controller.yaml": [
"calico",
"static",
"427820ac4957c60306b3084c4426ecc84af34dc2b2a8f7c0d70242e53c27957c"
],
"tests/data/bird-operator/config.yaml": [
"calico",
"static",
"4786605f043192ab2970b7abd55c434620463248e2840a7d25a9ca31d913b416"
],
"tests/data/bird-operator/metadata.yaml": [
"calico",
"static",
"aff75a91343249cb86978512609d0e00c9d6271664b18eeed9e4ef415bd22937"
],
"tests/data/bird-operator/requirements.txt": [
"calico",
"static",
"7a70b4e7059a7d283c883288be3de0bed02d10fda4602c8de4699debcf6afbf2"
],
"tests/data/bird-operator/src/charm.py": [
"calico",
"static",
"8e0374bf6e887604e3ede4ba33d37cf0e43202b653cb3945cefff0d2a33e7a0c"
],
"tests/data/bundle.yaml": [
"calico",
"static",
"0bfb15407e4ac33c87718e20493c0eec3979d8658db85d4f38620b9fca4408bd"
],
"tests/functional/conftest.py": [
"layer:kubernetes-common",
"static",
"fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c"
],
"tests/functional/test_k8s_common.py": [
"layer:kubernetes-common",
"static",
"680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50"
],
"tests/integration/conftest.py": [
"calico",
"static",
"9069857cdd09798df7813cec38c1147938e9affb777d971c6b8ff3405fa726ff"
],
"tests/integration/test_calico_integration.py": [
"calico",
"static",
"65ce21b694059e6cf6b39d28cba16252a34322b77b5b33050fd46482f60d07fd"
],
"tests/unit/conftest.py": [
"calico",
"static",
"2c58cb11bf276805f586c05c20bf4ba15a7431b5c23ea3323dc4256ddc34c4d2"
],
"tests/unit/test_calico.py": [
"calico",
"static",
"2de748d396d66f5c581ade110a3f8a709e6aabe50f97502e1d0ac0ec817c223d"
],
"tests/unit/test_k8s_common.py": [
"layer:kubernetes-common",
"static",
"da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4"
],
"tests/validate-wheelhouse.sh": [
"calico",
"static",
"cdfd66832b110243b6fd165a75562d9b958f9741b334be2d3a7a1d05adfa6fe7"
],
"tox.ini": [
"calico",
"static",
"a96563719d29a96d41a0e91ef08da35b5e1de5aee2d5884c74d85dca7f43f2d2"
],
"version": [
"calico",
"dynamic",
"d42cce56c73a1877421efe5be4d1e7e914a99ce4e1e4b0143bd97ea895c7c629"
],
"wheelhouse.txt": [
"calico",
"dynamic",
"cb5ab8b42ebef8ae5adc80de0d7c39f84aeaa97207298aa453142bff87c39f8c"
],
"wheelhouse/Jinja2-2.10.1.tar.gz": [
"layer:basic",
"dynamic",
"065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
],
"wheelhouse/MarkupSafe-1.1.1.tar.gz": [
"layer:basic",
"dynamic",
"29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
],
"wheelhouse/PyYAML-5.2.tar.gz": [
"layer:basic",
"dynamic",
"c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
],
"wheelhouse/Tempita-0.5.2.tar.gz": [
"__pip__",
"dynamic",
"cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
],
"wheelhouse/charmhelpers-0.20.23.tar.gz": [
"layer:basic",
"dynamic",
"59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e"
],
"wheelhouse/charms.reactive-1.4.1.tar.gz": [
"layer:basic",
"dynamic",
"bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
],
"wheelhouse/click-7.1.2.tar.gz": [
"calico",
"dynamic",
"d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"
],
"wheelhouse/conctl-py35-0.1.2.tar.gz": [
"__pip__",
"dynamic",
"fad07dd70e04338f2df7fa5a38448223613b87b09a571ea5d2b3c780bb1eca0b"
],
"wheelhouse/netaddr-0.7.19.tar.gz": [
"layer:basic",
"dynamic",
"38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
],
"wheelhouse/pbr-5.6.0.tar.gz": [
"__pip__",
"dynamic",
"42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
],
"wheelhouse/pip-18.1.tar.gz": [
"layer:basic",
"dynamic",
"c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
],
"wheelhouse/pyaml-21.10.1.tar.gz": [
"__pip__",
"dynamic",
"c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383"
],
"wheelhouse/setuptools-41.6.0.zip": [
"layer:basic",
"dynamic",
"6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
],
"wheelhouse/setuptools_scm-1.17.0.tar.gz": [
"layer:basic",
"dynamic",
"70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
],
"wheelhouse/six-1.16.0.tar.gz": [
"__pip__",
"dynamic",
"1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
],
"wheelhouse/wheel-0.33.6.tar.gz": [
"layer:basic",
"dynamic",
"10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
]
}
}

22
calico/.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: Test Suite
on: [pull_request]
jobs:
tests:
name: Lint, Unit, & Func Tests
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
- name: Run lint
run: tox

52
calico/.github/workflows/tox.yaml vendored Normal file
View File

@ -0,0 +1,52 @@
name: Run tests with Tox
on:
push:
branches: [master]
pull_request:
branches: [master]
jobs:
lint-unit-wheelhouse:
name: Lint, Unit, Wheelhouse
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
sudo snap install charm --classic
- name: Lint
run: tox -vve lint
- name: Unit Tests
run: tox -vve unit
- name: Validate Wheelhouse
run: tox -vve validate-wheelhouse
integration-test:
name: Integration test with VMWare
runs-on: self-hosted
timeout-minutes: 360
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@master
with:
provider: vsphere
credentials-yaml: ${{ secrets.CREDENTIALS_YAML }}
clouds-yaml: ${{ secrets.CLOUDS_YAML }}
bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764"
- name: Run test
run: tox -e integration

3
calico/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
.tox/
__pycache__/
*.pyc

38
calico/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,38 @@
# Contributor Guide
This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
for code, suggestions and documentation.
This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
all build a better charm - please give them a read before working on any contributions.
## Licensing
This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
make to this project. Please familiarise yourself with the terms of the license.
Additionally, this charm uses the Harmony CLA agreement. Its the easiest way for you to give us permission to
use your contributions.
In effect, youre giving us a license, but you still own the copyright — so you retain the right to modify your
code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
making any contributions.
## Code of conduct
We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
## Contributing code
The [DEVELOPING.md](./DEVELOPING.md) page has some useful information regarding building and testing. To contribute code
to this project, the workflow is as follows:
1. [Submit a bug](https://bugs.launchpad.net/charm-calico/+filebug) to explain the need for and track the change.
2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
3. Submit a PR. The PR description should include a link to the bug on Launchpad.
4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
for further release according to priority.
## Documentation
Documentation for this charm is currently maintained as part of the Charmed Kubernetes docs.
See [this page](https://github.com/charmed-kubernetes/kubernetes-docs/blob/master/pages/k8s/charm-calico.md)

62
calico/DEVELOPING.md Normal file
View File

@ -0,0 +1,62 @@
# Developing layer-calico
## Installing build dependencies
To install build dependencies:
```
sudo snap install charm --classic
sudo apt install docker.io
sudo usermod -aG docker $USER
```
After running these commands, terminate your shell session and start a new one
to pick up the modified user groups.
## Building the charm
To build the charm:
```
charm build
```
By default, this will build the charm and place it in
`/tmp/charm-builds/calico`.
## Building resources
To build resources:
```
./build-calico-resources.sh
```
This will produce several .tar.gz files that you will need to attach to the
charm when you deploy it.
## Testing
You can test a locally built calico charm by deploying it with Charmed
Kubernetes.
Create a file named `local-calico.yaml` that contains the following (with paths
adjusted to fit your environment):
```
applications:
calico:
charm: /tmp/charm-builds/calico
resources:
calico: /path/to/layer-calico/calico-amd64.tar.gz
calico-upgrade: /path/to/layer-calico/calico-upgrade-amd64.tar.gz
```
Then deploy Charmed Kubernetes with your locally built calico charm:
```
juju deploy cs:~containers/kubernetes-calico --overlay local-calico.yaml
```
## Helpful links
* [Getting Started with charm development](https://jaas.ai/docs/getting-started-with-charm-development)
* [Charm tools documentation](https://jaas.ai/docs/charm-tools)
* [Charmed Kubernetes Calico documentation](https://ubuntu.com/kubernetes/docs/cni-calico)

202
calico/LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

24
calico/Makefile Normal file
View File

@ -0,0 +1,24 @@
#!/usr/bin/make
all: lint unit_test
.PHONY: clean
clean:
@rm -rf .tox
.PHONY: apt_prereqs
apt_prereqs:
@# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
@which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
.PHONY: lint
lint: apt_prereqs
@tox --notest
@PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
@charm proof
.PHONY: unit_test
unit_test: apt_prereqs
@echo Starting tests...
tox

22
calico/README.md Normal file
View File

@ -0,0 +1,22 @@
# Calico Charm
Calico is a new approach to virtual networking and network security for containers,
VMs, and bare metal services, that provides a rich set of security enforcement
capabilities running on top of a highly scalable and efficient virtual network fabric.
This charm will deploy calico as a background service, and configure CNI for
use with calico, on any principal charm that implements the [kubernetes-cni][]
interface.
This charm is a component of Charmed Kubernetes. For full information,
please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-calico).
[kubernetes-cni]: https://github.com/juju-solutions/interface-kubernetes-cni
# Developers
## Build charm
```
make charm
```

107
calico/bin/charm-env Executable file
View File

@ -0,0 +1,107 @@
#!/bin/bash
VERSION="1.0.0"
find_charm_dirs() {
# Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
if [[ -z "$JUJU_CHARM_DIR" ]]; then
# accept $CHARM_DIR to be more forgiving
export JUJU_CHARM_DIR="$CHARM_DIR"
fi
if [[ -z "$CHARM_DIR" ]]; then
# set CHARM_DIR as well to help with backwards compatibility
export CHARM_DIR="$JUJU_CHARM_DIR"
fi
return
fi
# Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
# (because there's got to be at least one principle) charm directory;
# if there are several, pick the first by alpha order.
agents_dir="/var/lib/juju/agents"
if [[ -d "$agents_dir" ]]; then
desired_charm="$1"
found_charm_dir=""
if [[ -n "$desired_charm" ]]; then
for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
if [[ "$charm_name" == "$desired_charm" ]]; then
if [[ -n "$found_charm_dir" ]]; then
>&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
exit 1
fi
found_charm_dir="$charm_dir"
fi
done
if [[ -z "$found_charm_dir" ]]; then
>&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
exit 1
fi
export JUJU_CHARM_DIR="$found_charm_dir"
export CHARM_DIR="$found_charm_dir"
return
fi
# shellcheck disable=SC2126
non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
if [[ "$non_subordinates" -gt 1 ]]; then
>&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
exit 1
elif [[ "$non_subordinates" -eq 1 ]]; then
for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
continue
fi
export JUJU_CHARM_DIR="$charm_dir"
export CHARM_DIR="$charm_dir"
return
done
fi
fi
>&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
exit 1
}
try_activate_venv() {
if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
. "$JUJU_CHARM_DIR/../.venv/bin/activate"
fi
}
find_wrapped() {
PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
}
if [[ "$1" == "--version" || "$1" == "-v" ]]; then
echo "$VERSION"
exit 0
fi
# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
# NB: --charm option must come first
# NB: option must be processed outside find_charm_dirs to modify $@
charm_name=""
if [[ "$1" == "--charm" ]]; then
charm_name="$2"
shift; shift
fi
find_charm_dirs "$charm_name"
try_activate_venv
export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
if [[ "$(basename "$0")" == "charm-env" ]]; then
# being used as a shebang
exec "$@"
elif [[ "$0" == "$BASH_SOURCE" ]]; then
# being invoked as a symlink wrapping something to find in the venv
exec "$(find_wrapped)" "$@"
elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
# being sourced directly; do nothing
/bin/true
else
# being sourced for wrapped bash helpers
. "$(find_wrapped)"
fi

22
calico/bin/layer_option Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
import sys
import argparse
from charms import layer
parser = argparse.ArgumentParser(description='Access layer options.')
parser.add_argument('section',
help='the section, or layer, the option is from')
parser.add_argument('option',
help='the option to access')
args = parser.parse_args()
value = layer.options.get(args.section, args.option)
if isinstance(value, bool):
sys.exit(0 if value else 1)
elif isinstance(value, list):
for val in value:
print(val)
else:
print(value)

112
calico/build-calico-resource.sh Executable file
View File

@ -0,0 +1,112 @@
#!/bin/bash
set -eux
# This script will fetch binaries and create resource tarballs for use by
# charm-[push|release]. The arm64 binaries are not available upsteram for
# v2.6, so we must build them and host them somewhere ourselves. The steps
# for doing that are documented here:
#
# https://gist.github.com/kwmonroe/9b5f8dac2c17f93629a1a3868b22d671
# Supported calico architectures
arches="amd64 arm64"
calicoctl_version="v3.10.1"
calico_cni_version="v3.10.1"
function fetch_and_validate() {
# fetch a binary and make sure it's what we expect (executable > 20MB)
min_bytes=20000000
location="${1-}"
if [ -z ${location} ]; then
echo "$0: Missing location parameter for fetch_and_validate"
exit 1
fi
# remove everything up until the last slash to get the filename
filename=$(echo "${location##*/}")
case ${location} in
http*)
fetch_cmd="wget ${location} -O ./${filename}"
;;
*)
fetch_cmd="scp ${location} ./${filename}"
;;
esac
${fetch_cmd}
# Make sure we fetched something big enough
actual_bytes=$(wc -c < ${filename})
if [ $actual_bytes -le $min_bytes ]; then
echo "$0: ${filename} should be at least ${min_bytes} bytes"
exit 1
fi
# Make sure we fetched a binary
if ! file ${filename} 2>&1 | grep -q 'executable'; then
echo "$0: ${filename} is not an executable"
exit 1
fi
}
for arch in ${arches}; do
rm -rf resource-build-$arch
mkdir resource-build-$arch
pushd resource-build-$arch
fetch_and_validate \
https://github.com/projectcalico/calicoctl/releases/download/$calicoctl_version/calicoctl-linux-$arch
fetch_and_validate \
https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-$arch
fetch_and_validate \
https://github.com/projectcalico/cni-plugin/releases/download/$calico_cni_version/calico-ipam-$arch
mv calicoctl-linux-$arch calicoctl
mv calico-$arch calico
mv calico-ipam-$arch calico-ipam
chmod +x calicoctl calico calico-ipam
tar -zcvf ../calico-$arch.tar.gz .
popd
rm -rf resource-build-$arch
done
# calico-upgrade resource
for arch in ${arches}; do
rm -rf resource-build-upgrade
mkdir resource-build-upgrade
pushd resource-build-upgrade
if [ $arch = amd64 ]; then
fetch_and_validate \
https://github.com/projectcalico/calico-upgrade/releases/download/v1.0.5/calico-upgrade
chmod +x calico-upgrade
elif [ $arch = arm64 ]; then
# git clone https://github.com/projectcalico/calico-upgrade repo
# pushd repo
# git checkout 2de2f7a0f26ef3bb1c2cabf06b2dcbcc2bba1d35 # known good commit
# make build ARCH=arm64
# popd
# mv repo/dist/calico-upgrade-linux-$arch ./calico-upgrade
# arm64 builds are failing due to an upstream issue:
# https://github.com/projectcalico/calico-upgrade/issues/42
# For now, we will pull a previously built binary from the charm store.
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-698/resource/calico-upgrade-arm64/462 \
-O calico-upgrade-arm64.tar.gz
tar -xf calico-upgrade-arm64.tar.gz
checksum="$(sha256sum calico-upgrade)"
if [ "$checksum" != "7a07816c26ad19f526ab2f57353043dabd708a48185268b41493e458c59b797d calico-upgrade" ]; then
echo 'ERROR: checksum does not match, aborting'
exit 1
fi
else
echo "Unsupported architecture for calico-upgrade: $arch"
exit 1
fi
tar -zcvf ../calico-upgrade-$arch.tar.gz ./calico-upgrade
popd
rm -rf resource-build-upgrade
done
# calico-upgrade arm64
rm -rf resource-build-upgrade-arm64
touch calico-node-image.tar.gz

163
calico/config.yaml Normal file
View File

@ -0,0 +1,163 @@
"options":
"bgp-service-cluster-ips":
"type": "string"
"description": |
Space-separated list of service cluster CIDRs to advertise over BGP.
These will be passed to the .spec.serviceClusterIPs field of the default
BGPConfiguration in Calico.
Example value: ”10.0.0.0/24 10.0.1.0/24”
"default": ""
"bgp-service-external-ips":
"type": "string"
"description": |
Space-separated list of service external CIDRs to advertise over BGP.
These will be passed to the .spec.serviceExternalIPs field of the default
BGPConfiguration in Calico.
Example value: ”10.0.0.0/24 10.0.1.0/24”
"default": ""
"calico-node-image":
"type": "string"
# Please refer to layer-canal/versioning.md before changing the version below.
"default": "rocks.canonical.com:443/cdk/calico/node:v3.10.1"
"description": |
The image id to use for calico/node.
"calico-policy-image":
"type": "string"
"default": "rocks.canonical.com:443/cdk/calico/kube-controllers:v3.10.1"
"description": |
The image id to use for calico/kube-controllers.
"ipip":
"type": "string"
"default": "Never"
"description": |
IPIP encapsulation mode. Must be one of "Always", "CrossSubnet", or "Never".
This is incompatible with VXLAN encapsulation. If VXLAN encapsulation is
enabled, then this must be set to "Never".
"vxlan":
"type": "string"
"default": "Never"
"description": |
VXLAN encapsulation mode. Must be one of "Always", "CrossSubnet", or "Never".
This is incompatible with IPIP encapsulation. If IPIP encapsulation is
enabled, then this must be set to "Never".
"veth-mtu":
"type": "int"
"default": !!null ""
"description": |
Set veth MTU size. This should be set to the MTU size of the base network.
If VXLAN is enabled, then the charm will automatically subtract 50 from the
specified MTU size.
If IPIP is enabled, then the charm will automatically subtract 20 from the
specified MTU size.
"nat-outgoing":
"type": "boolean"
"default": !!bool "true"
"description": |
NAT outgoing traffic
"cidr":
"type": "string"
"default": "192.168.0.0/16"
"description": |
Network CIDR assigned to Calico. This is applied to the default Calico
pool, and is also communicated to the Kubernetes charms for use in
kube-proxy configuration.
"manage-pools":
"type": "boolean"
"default": !!bool "true"
"description": |
If true, a default pool is created using the cidr and ipip charm
configuration values.
Warning: When manage-pools is enabled, the charm will delete any pools
that are unrecognized.
"global-as-number":
"type": "int"
"default": !!int "64512"
"description": |
Global AS number.
"subnet-as-numbers":
"type": "string"
"default": "{}"
"description": |
Mapping of subnets to AS numbers, specified as YAML. Each Calico node
will be assigned an AS number based on the entries in this mapping.
Example value: "{10.0.0.0/24: 64512, 10.0.1.0/24: 64513}"
If a node's IP matches any of the specified subnets, then the
corresponding AS number is used instead of the global one.
If a node's IP matches no subnets, then the global AS number will be
used instead.
If a node's IP matches multiple subnets, then the most specific subnet
will be used, e.g. a /24 subnet will take precedence over a /16.
"unit-as-numbers":
"type": "string"
"default": "{}"
"description": |
Mapping of unit IDs to AS numbers, specified as YAML. Each Calico node
will be assigned an AS number based on the entries in this mapping.
Example value: "{0: 64512, 1: 64513}"
This takes precedence over global-as-number and subnet-as-numbers.
"node-to-node-mesh":
"type": "boolean"
"default": !!bool "true"
"description": |
When enabled, each Calico node will peer with every other Calico node in
the cluster.
"global-bgp-peers":
"type": "string"
"default": "[]"
"description": |
List of global BGP peers. Each BGP peer is specified with an address and
an as-number.
Example value: "[{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}]"
"subnet-bgp-peers":
"type": "string"
"default": "{}"
"description": |
Mapping of subnets to lists of BGP peers. Each BGP peer is specified with
an address and an as-number.
Example value: "{10.0.0.0/24: [{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}], 10.0.1.0/24: [{address: 10.0.1.1, as-number: 65002}]}"
If a node's IP matches multiple subnets, then peerings will be added for
each matched subnet.
"unit-bgp-peers":
"type": "string"
"default": "{}"
"description": |
Mapping of unit IDs to lists of BGP peers. Each BGP peer is specified
with an address and an as-number.
Example value: "{0: [{address: 10.0.0.1, as-number: 65000}, {address: 10.0.0.2, as-number: 65001}], 1: [{address: 10.0.1.1, as-number: 65002}]}"
"route-reflector-cluster-ids":
"type": "string"
"default": "{}"
"description": |
Mapping of unit IDs to route reflector cluster IDs. Assigning a route
reflector cluster ID allows the node to function as a route reflector.
Example value: "{0: 224.0.0.1, 2: 224.0.0.1}"
"ignore-loose-rpf":
"type": "boolean"
"default": !!bool "false"
"description": |
Enable or disable IgnoreLooseRPF for Calico Felix. This is only used
when rp_filter is set to a value of 2.
"disable-vxlan-tx-checksumming":
"type": "boolean"
"default": !!bool "true"
"description": |
When set to true, if VXLAN encapsulation is in use, then the charm will
disable TX checksumming on the vxlan.calico network interface. This works
around an upstream issue in Calico:
https://github.com/projectcalico/calico/issues/3145

16
calico/copyright Normal file
View File

@ -0,0 +1,16 @@
Format: http://dep.debian.net/deps/dep5/
Files: *
Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
License: Apache License 2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.
http://www.apache.org/licenses/LICENSE-2.0
.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,16 @@
Format: http://dep.debian.net/deps/dep5/
Files: *
Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
License: Apache License 2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.
http://www.apache.org/licenses/LICENSE-2.0
.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,15 @@
Copyright 2015-2016 Canonical Ltd.
This file is part of the Leadership Layer for Juju.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3, as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranties of
MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,16 @@
Format: http://dep.debian.net/deps/dep5/
Files: *
Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
License: Apache License 2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.
http://www.apache.org/licenses/LICENSE-2.0
.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

91
calico/docs/status.md Normal file
View File

@ -0,0 +1,91 @@
<h1 id="charms.layer.status.WorkloadState">WorkloadState</h1>
```python
WorkloadState(self, /, *args, **kwargs)
```
Enum of the valid workload states.
Valid options are:
* `WorkloadState.MAINTENANCE`
* `WorkloadState.BLOCKED`
* `WorkloadState.WAITING`
* `WorkloadState.ACTIVE`
<h1 id="charms.layer.status.maintenance">maintenance</h1>
```python
maintenance(message)
```
Set the status to the `MAINTENANCE` state with the given operator message.
__Parameters__
- __`message` (str)__: Message to convey to the operator.
<h1 id="charms.layer.status.maint">maint</h1>
```python
maint(message)
```
Shorthand alias for
[maintenance](status.md#charms.layer.status.maintenance).
__Parameters__
- __`message` (str)__: Message to convey to the operator.
<h1 id="charms.layer.status.blocked">blocked</h1>
```python
blocked(message)
```
Set the status to the `BLOCKED` state with the given operator message.
__Parameters__
- __`message` (str)__: Message to convey to the operator.
<h1 id="charms.layer.status.waiting">waiting</h1>
```python
waiting(message)
```
Set the status to the `WAITING` state with the given operator message.
__Parameters__
- __`message` (str)__: Message to convey to the operator.
<h1 id="charms.layer.status.active">active</h1>
```python
active(message)
```
Set the status to the `ACTIVE` state with the given operator message.
__Parameters__
- __`message` (str)__: Message to convey to the operator.
<h1 id="charms.layer.status.status_set">status_set</h1>
```python
status_set(workload_state, message)
```
Set the status to the given workload state with a message.
__Parameters__
- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
member, or the string value of one of those members.
- __`message` (str)__: Message to convey to the operator.

View File

@ -0,0 +1,4 @@
# This stubs out charm-pre-install coming from layer-docker as a workaround for
# offline installs until https://github.com/juju/charm-tools/issues/301 is fixed.

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/config-changed Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/install Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/leader-elected Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/pre-series-upgrade Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

View File

@ -0,0 +1 @@
.DS_Store

View File

@ -0,0 +1,89 @@
# Overview
This interface layer handles the communication with Etcd via the `etcd`
interface.
# Usage
## Requires
This interface layer will set the following states, as appropriate:
* `{relation_name}.connected` The relation is established, but Etcd may not
yet have provided any connection or service information.
* `{relation_name}.available` Etcd has provided its connection string
information, and is ready to serve as a KV store.
The provided information can be accessed via the following methods:
* `etcd.get_connection_string()`
* `etcd.get_version()`
* `{relation_name}.tls.available` Etcd has provided the connection string
information, and the tls client credentials to communicate with it.
The client credentials can be accessed via:
* `{relation_name}.get_client_credentials()` returning a dictionary of
the clinet certificate, key and CA.
* `{relation_name}.save_client_credentials(key, cert, ca)` is a convenience
method to save the client certificate, key and CA to files of your
choosing.
For example, a common application for this is configuring an applications
backend key/value storage, like Docker.
```python
@when('etcd.available', 'docker.available')
def swarm_etcd_cluster_setup(etcd):
con_string = etcd.connection_string().replace('http', 'etcd')
opts = {}
opts['connection_string'] = con_string
render('docker-compose.yml', 'files/swarm/docker-compose.yml', opts)
```
## Provides
A charm providing this interface is providing the Etcd rest api service.
This interface layer will set the following states, as appropriate:
* `{relation_name}.connected` One or more clients of any type have
been related. The charm should call the following methods to provide the
appropriate information to the clients:
* `{relation_name}.set_connection_string(string, version)`
* `{relation_name}.set_client_credentials(key, cert, ca)`
Example:
```python
@when('db.connected')
def send_connection_details(db):
cert = leader_get('client_certificate')
key = leader_get('client_key')
ca = leader_get('certificate_authority')
# Set the key, cert, and ca on the db relation
db.set_client_credentials(key, cert, ca)
port = hookenv.config().get('port')
# Get all the peers participating in the cluster relation.
addresses = cluster.get_peer_addresses()
connections = []
for address in addresses:
connections.append('http://{0}:{1}'.format(address, port))
# Set the connection string on the db relation.
db.set_connection_string(','.join(conections))
```
# Contact Information
### Maintainer
- Charles Butler <charles.butler@canonical.com>
# Etcd
- [Etcd](https://coreos.com/etcd/) home page
- [Etcd bug trackers](https://github.com/coreos/etcd/issues)
- [Etcd Juju Charm](http://jujucharms.com/?text=etcd)

View File

View File

@ -0,0 +1,4 @@
name: etcd
summary: Interface for relating to ETCD
version: 2
maintainer: "Charles Butler <charles.butler@canonical.com>"

View File

@ -0,0 +1,70 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class EtcdPeer(RelationBase):
'''This class handles peer relation communication by setting states that
the reactive code can respond to. '''
scope = scopes.UNIT
@hook('{peers:etcd}-relation-joined')
def peer_joined(self):
'''A new peer has joined, set the state on the unit so we can track
when they are departed. '''
conv = self.conversation()
conv.set_state('{relation_name}.joined')
@hook('{peers:etcd}-relation-departed')
def peers_going_away(self):
'''Trigger a state on the unit that it is leaving. We can use this
state in conjunction with the joined state to determine which unit to
unregister from the etcd cluster. '''
conv = self.conversation()
conv.remove_state('{relation_name}.joined')
conv.set_state('{relation_name}.departing')
def dismiss(self):
'''Remove the departing state from all other units in the conversation,
and we can resume normal operation.
'''
for conv in self.conversations():
conv.remove_state('{relation_name}.departing')
def get_peers(self):
'''Return a list of names for the peers participating in this
conversation scope. '''
peers = []
# Iterate over all the conversations of this type.
for conversation in self.conversations():
peers.append(conversation.scope)
return peers
def set_db_ingress_address(self, address):
'''Set the ingress address belonging to the db relation.'''
for conversation in self.conversations():
conversation.set_remote('db-ingress-address', address)
def get_db_ingress_addresses(self):
'''Return a list of db ingress addresses'''
addresses = []
# Iterate over all the conversations of this type.
for conversation in self.conversations():
address = conversation.get_remote('db-ingress-address')
if address:
addresses.append(address)
return addresses

View File

@ -0,0 +1,47 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class EtcdProvider(RelationBase):
scope = scopes.GLOBAL
@hook('{provides:etcd}-relation-{joined,changed}')
def joined_or_changed(self):
''' Set the connected state from the provides side of the relation. '''
self.set_state('{relation_name}.connected')
@hook('{provides:etcd}-relation-{broken,departed}')
def broken_or_departed(self):
'''Remove connected state from the provides side of the relation. '''
conv = self.conversation()
if len(conv.units) == 1:
conv.remove_state('{relation_name}.connected')
def set_client_credentials(self, key, cert, ca):
''' Set the client credentials on the global conversation for this
relation. '''
self.set_remote('client_key', key)
self.set_remote('client_ca', ca)
self.set_remote('client_cert', cert)
def set_connection_string(self, connection_string, version=''):
''' Set the connection string on the global conversation for this
relation. '''
# Note: Version added as a late-dependency for 2 => 3 migration
# If no version is specified, consumers should presume etcd 2.x
self.set_remote('connection_string', connection_string)
self.set_remote('version', version)

View File

@ -0,0 +1,80 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class EtcdClient(RelationBase):
scope = scopes.GLOBAL
@hook('{requires:etcd}-relation-{joined,changed}')
def changed(self):
''' Indicate the relation is connected, and if the relation data is
set it is also available. '''
self.set_state('{relation_name}.connected')
if self.get_connection_string():
self.set_state('{relation_name}.available')
# Get the ca, key, cert from the relation data.
cert = self.get_client_credentials()
# The tls state depends on the existance of the ca, key and cert.
if cert['client_cert'] and cert['client_key'] and cert['client_ca']: # noqa
self.set_state('{relation_name}.tls.available')
@hook('{requires:etcd}-relation-{broken, departed}')
def broken(self):
''' Indicate the relation is no longer available and not connected. '''
self.remove_state('{relation_name}.available')
self.remove_state('{relation_name}.connected')
self.remove_state('{relation_name}.tls.available')
def connection_string(self):
''' This method is depreciated but ensures backward compatibility
@see get_connection_string(self). '''
return self.get_connection_string()
def get_connection_string(self):
''' Return the connection string, if available, or None. '''
return self.get_remote('connection_string')
def get_version(self):
''' Return the version of the etd protocol being used, or None. '''
return self.get_remote('version')
def get_client_credentials(self):
''' Return a dict with the client certificate, ca and key to
communicate with etcd using tls. '''
return {'client_cert': self.get_remote('client_cert'),
'client_key': self.get_remote('client_key'),
'client_ca': self.get_remote('client_ca')}
def save_client_credentials(self, key, cert, ca):
''' Save all the client certificates for etcd to local files. '''
self._save_remote_data('client_cert', cert)
self._save_remote_data('client_key', key)
self._save_remote_data('client_ca', ca)
def _save_remote_data(self, key, path):
''' Save the remote data to a file indicated by path creating the
parent directory if needed.'''
value = self.get_remote(key)
if value:
parent = os.path.dirname(path)
if not os.path.isdir(parent):
os.makedirs(parent)
with open(path, 'w') as stream:
stream.write(value)

View File

@ -0,0 +1,24 @@
name: Test Suite for K8s Service Interface
on:
- pull_request
jobs:
lint-and-unit-tests:
name: Lint & Unit tests
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Tox
run: pip install tox
- name: Run lint & unit tests
run: tox

View File

@ -0,0 +1,4 @@
.DS_Store
.tox
__pycache__
*.pyc

View File

@ -0,0 +1,6 @@
name: kubernetes-cni
summary: Interface for relating various CNI implementations
version: 0
maintainer: "George Kraft <george.kraft@canonical.com>"
ignore:
- tests

View File

@ -0,0 +1,89 @@
#!/usr/bin/python
from charmhelpers.core import hookenv
from charmhelpers.core.host import file_hash
from charms.layer.kubernetes_common import kubeclientconfig_path
from charms.reactive import Endpoint
from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag
class CNIPluginProvider(Endpoint):
def manage_flags(self):
toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined)
toggle_flag(
self.expand_name("{endpoint_name}.available"), self.config_available()
)
if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")):
clear_flag(self.expand_name("{endpoint_name}.configured"))
clear_flag(self.expand_name("endpoint.{endpoint_name}.changed"))
def set_config(self, is_master):
"""Relays a dict of kubernetes configuration information."""
for relation in self.relations:
relation.to_publish_raw.update({"is_master": is_master})
set_flag(self.expand_name("{endpoint_name}.configured"))
def config_available(self):
"""Ensures all config from the CNI plugin is available."""
goal_state = hookenv.goal_state()
related_apps = [
app
for app in goal_state.get("relations", {}).get(self.endpoint_name, "")
if "/" not in app
]
if not related_apps:
return False
configs = self.get_configs()
return all(
"cidr" in config and "cni-conf-file" in config
for config in [configs.get(related_app, {}) for related_app in related_apps]
)
def get_config(self, default=None):
"""Get CNI config for one related application.
If default is specified, and there is a related application with a
matching name, then that application is chosen. Otherwise, the
application is chosen alphabetically.
Whichever application is chosen, that application's CNI config is
returned.
"""
configs = self.get_configs()
if not configs:
return {}
elif default and default not in configs:
msg = "relation not found for default CNI %s, ignoring" % default
hookenv.log(msg, level="WARN")
return self.get_config()
elif default:
return configs.get(default, {})
else:
return configs.get(sorted(configs)[0], {})
def get_configs(self):
"""Get CNI configs for all related applications.
This returns a mapping of application names to CNI configs. Here's an
example return value:
{
'flannel': {
'cidr': '10.1.0.0/16',
'cni-conf-file': '10-flannel.conflist'
},
'calico': {
'cidr': '192.168.0.0/16',
'cni-conf-file': '10-calico.conflist'
}
}
"""
return {
relation.application_name: relation.joined_units.received_raw
for relation in self.relations
if relation.application_name
}
def notify_kubeconfig_changed(self):
kubeconfig_hash = file_hash(kubeclientconfig_path)
for relation in self.relations:
relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash})

View File

@ -0,0 +1,54 @@
#!/usr/bin/python
from charmhelpers.core import unitdata
from charms.reactive import Endpoint
from charms.reactive import when_any, when_not
from charms.reactive import set_state, remove_state
db = unitdata.kv()
class CNIPluginClient(Endpoint):
def manage_flags(self):
kubeconfig_hash = self.get_config().get("kubeconfig-hash")
kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash")
if kubeconfig_hash:
set_state(self.expand_name("{endpoint_name}.kubeconfig.available"))
if kubeconfig_hash != db.get(kubeconfig_hash_key):
set_state(self.expand_name("{endpoint_name}.kubeconfig.changed"))
db.set(kubeconfig_hash_key, kubeconfig_hash)
@when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed")
def changed(self):
"""Indicate the relation is connected, and if the relation data is
set it is also available."""
set_state(self.expand_name("{endpoint_name}.connected"))
config = self.get_config()
if config["is_master"] == "True":
set_state(self.expand_name("{endpoint_name}.is-master"))
set_state(self.expand_name("{endpoint_name}.configured"))
elif config["is_master"] == "False":
set_state(self.expand_name("{endpoint_name}.is-worker"))
set_state(self.expand_name("{endpoint_name}.configured"))
else:
remove_state(self.expand_name("{endpoint_name}.configured"))
remove_state(self.expand_name("endpoint.{endpoint_name}.changed"))
@when_not("endpoint.{endpoint_name}.joined")
def broken(self):
"""Indicate the relation is no longer available and not connected."""
remove_state(self.expand_name("{endpoint_name}.connected"))
remove_state(self.expand_name("{endpoint_name}.is-master"))
remove_state(self.expand_name("{endpoint_name}.is-worker"))
remove_state(self.expand_name("{endpoint_name}.configured"))
def get_config(self):
"""Get the kubernetes configuration information."""
return self.all_joined_units.received_raw
def set_config(self, cidr, cni_conf_file):
"""Sets the CNI configuration information."""
for relation in self.relations:
relation.to_publish_raw.update(
{"cidr": cidr, "cni-conf-file": cni_conf_file}
)

View File

@ -0,0 +1,27 @@
[tox]
skipsdist = True
envlist = lint,py3
[testenv]
basepython = python3
setenv =
PYTHONPATH={toxinidir}:{toxinidir}/lib
PYTHONBREAKPOINT=ipdb.set_trace
deps =
pyyaml
pytest
flake8
black
ipdb
charms.unit_test
commands = pytest --tb native -s {posargs}
[testenv:lint]
envdir = {toxworkdir}/py3
commands =
flake8 {toxinidir}
black --check {toxinidir}
[flake8]
exclude=.tox
max-line-length = 88

22
calico/hooks/start Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/stop Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/update-status Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

22
calico/hooks/upgrade-charm Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer import basic # noqa
basic.bootstrap_charm_deps()
from charmhelpers.core import hookenv # noqa
hookenv.atstart(basic.init_config_states)
hookenv.atexit(basic.clear_config_states)
# This will load and run the appropriate @hook and other decorated
# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
# and $JUJU_CHARM_DIR/hooks/relations.
#
# See https://jujucharms.com/docs/stable/authors-charm-building
# for more information on this pattern.
from charms.reactive import main # noqa
main()

1378
calico/icon.svg Normal file

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 102 KiB

22
calico/layer.yaml Normal file
View File

@ -0,0 +1,22 @@
"includes":
- "layer:options"
- "interface:etcd"
- "interface:kubernetes-cni"
- "layer:basic"
- "layer:leadership"
- "layer:status"
- "layer:kubernetes-common"
"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"]
"options":
"basic":
"packages": []
"python_packages": []
"use_venv": !!bool "true"
"include_system_packages": !!bool "false"
"leadership": {}
"status":
"patch-hookenv": !!bool "true"
"kubernetes-common": {}
"calico": {}
"repo": "https://github.com/juju-solutions/layer-calico.git"
"is": "calico"

View File

@ -0,0 +1,10 @@
from subprocess import check_output
def arch():
'''Return the package architecture as a string.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture

View File

@ -0,0 +1,108 @@
import os
import shutil
import yaml
from subprocess import check_call, check_output, CalledProcessError
from calico_common import arch
from charms.reactive import endpoint_from_flag
from charmhelpers.core.hookenv import resource_get, status_set, log
CALICOCTL_PATH = '/opt/calicoctl'
ETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')
ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
CALICO_UPGRADE_DIR = '/opt/calico-upgrade'
ETCD2_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd2.yaml'
ETCD3_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd3.yaml'
class ResourceMissing(Exception):
pass
class DryRunFailed(Exception):
pass
def cleanup():
shutil.rmtree(CALICO_UPGRADE_DIR, ignore_errors=True)
def configure():
cleanup()
os.makedirs(CALICO_UPGRADE_DIR)
# Extract calico-upgrade resource
architecture = arch()
if architecture == 'amd64':
resource_name = 'calico-upgrade'
else:
resource_name = 'calico-upgrade-' + architecture
archive = resource_get(resource_name)
if not archive:
message = 'Missing calico-upgrade resource'
status_set('blocked', message)
raise ResourceMissing(message)
check_call(['tar', '-xvf', archive, '-C', CALICO_UPGRADE_DIR])
# Configure calico-upgrade, etcd2 (data source)
etcd = endpoint_from_flag('etcd.available')
etcd_endpoints = etcd.get_connection_string()
etcd2_data = {
'apiVersion': 'v1',
'kind': 'calicoApiConfig',
'metadata': None,
'spec': {
'datastoreType': 'etcdv2',
'etcdEndpoints': etcd_endpoints,
'etcdKeyFile': ETCD_KEY_PATH,
'etcdCertFile': ETCD_CERT_PATH,
'etcdCACertFile': ETCD_CA_PATH
}
}
with open(ETCD2_DATA_PATH, 'w') as f:
yaml.dump(etcd2_data, f)
# Configure calico-upgrade, etcd3 (data destination)
etcd3_data = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'CalicoAPIConfig',
'metadata': None,
'spec': {
'datastoreType': 'etcdv3',
'etcdEndpoints': etcd_endpoints,
'etcdKeyFile': ETCD_KEY_PATH,
'etcdCertFile': ETCD_CERT_PATH,
'etcdCACertFile': ETCD_CA_PATH
}
}
with open(ETCD3_DATA_PATH, 'w') as f:
yaml.dump(etcd3_data, f)
def invoke(*args):
cmd = [CALICO_UPGRADE_DIR + '/calico-upgrade'] + list(args)
cmd += [
'--apiconfigv1', ETCD2_DATA_PATH,
'--apiconfigv3', ETCD3_DATA_PATH
]
try:
return check_output(cmd)
except CalledProcessError as e:
log(e.output)
raise
def dry_run():
output = invoke('dry-run', '--output-dir', CALICO_UPGRADE_DIR)
if b'Successfully validated v1 to v3 conversion' not in output:
raise DryRunFailed()
def start():
invoke('start', '--no-prompts', '--output-dir', CALICO_UPGRADE_DIR)
def complete():
invoke('complete', '--no-prompts')

View File

@ -0,0 +1,60 @@
import sys
from importlib import import_module
from pathlib import Path
def import_layer_libs():
"""
Ensure that all layer libraries are imported.
This makes it possible to do the following:
from charms import layer
layer.foo.do_foo_thing()
Note: This function must be called after bootstrap.
"""
for module_file in Path('lib/charms/layer').glob('*'):
module_name = module_file.stem
if module_name in ('__init__', 'basic', 'execd') or not (
module_file.suffix == '.py' or module_file.is_dir()
):
continue
import_module('charms.layer.{}'.format(module_name))
# Terrible hack to support the old terrible interface.
# Try to get people to call layer.options.get() instead so
# that we can remove this garbage.
# Cribbed from https://stackoverfLow.com/a/48100440/4941864
class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
def __call__(self, section=None, layer_file=None):
if layer_file is None:
return self.get(section=section)
else:
return self.get(section=section,
layer_file=Path(layer_file))
def patch_options_interface():
from charms.layer import options
if sys.version_info.minor >= 5:
options.__class__ = OptionsBackwardsCompatibilityHack
else:
# Py 3.4 doesn't support changing the __class__, so we have to do it
# another way. The last line is needed because we already have a
# reference that doesn't get updated with sys.modules.
name = options.__name__
hack = OptionsBackwardsCompatibilityHack(name)
hack.get = options.get
sys.modules[name] = hack
sys.modules[__name__].options = hack
try:
patch_options_interface()
except ImportError:
# This may fail if pyyaml hasn't been installed yet. But in that
# case, the bootstrap logic will try it again once it has.
pass

View File

@ -0,0 +1,501 @@
import os
import sys
import re
import shutil
from distutils.version import LooseVersion
from pkg_resources import Requirement
from glob import glob
from subprocess import check_call, check_output, CalledProcessError
from time import sleep
from charms import layer
from charms.layer.execd import execd_preinstall
def _get_subprocess_env():
env = os.environ.copy()
env['LANG'] = env.get('LANG', 'C.UTF-8')
return env
def get_series():
"""
Return series for a few known OS:es.
Tested as of 2019 november:
* centos6, centos7, rhel6.
* bionic
"""
series = ""
# Looking for content in /etc/os-release
# works for ubuntu + some centos
if os.path.isfile('/etc/os-release'):
d = {}
with open('/etc/os-release', 'r') as rel:
for l in rel:
if not re.match(r'^\s*$', l):
k, v = l.split('=')
d[k.strip()] = v.strip().replace('"', '')
series = "{ID}{VERSION_ID}".format(**d)
# Looking for content in /etc/redhat-release
# works for redhat enterprise systems
elif os.path.isfile('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as redhatlsb:
# CentOS Linux release 7.7.1908 (Core)
line = redhatlsb.readline()
release = int(line.split("release")[1].split()[0][0])
series = "centos" + str(release)
# Looking for content in /etc/lsb-release
# works for ubuntu
elif os.path.isfile('/etc/lsb-release'):
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
series = d['DISTRIB_CODENAME']
# This is what happens if we cant figure out the OS.
else:
series = "unknown"
return series
def bootstrap_charm_deps():
"""
Set up the base charm dependencies so that the reactive system can run.
"""
# execd must happen first, before any attempt to install packages or
# access the network, because sites use this hook to do bespoke
# configuration and install secrets so the rest of this bootstrap
# and the charm itself can actually succeed. This call does nothing
# unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
execd_preinstall()
# ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
series = get_series()
# OMG?! is build-essentials needed?
ubuntu_packages = ['python3-pip',
'python3-setuptools',
'python3-yaml',
'python3-dev',
'python3-wheel',
'build-essential']
# I'm not going to "yum group info "Development Tools"
# omitting above madness
centos_packages = ['python3-pip',
'python3-setuptools',
'python3-devel',
'python3-wheel']
packages_needed = []
if 'centos' in series:
packages_needed = centos_packages
else:
packages_needed = ubuntu_packages
charm_dir = os.environ['JUJU_CHARM_DIR']
os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
venv = os.path.abspath('../.venv')
vbin = os.path.join(venv, 'bin')
vpip = os.path.join(vbin, 'pip')
vpy = os.path.join(vbin, 'python')
hook_name = os.path.basename(sys.argv[0])
is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
is_charm_upgrade = hook_name == 'upgrade-charm'
is_series_upgrade = hook_name == 'post-series-upgrade'
is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
is_upgrade = (not is_post_upgrade and
(is_charm_upgrade or is_series_upgrade))
if is_bootstrapped and not is_upgrade:
# older subordinates might have downgraded charm-env, so we should
# restore it if necessary
install_or_update_charm_env()
activate_venv()
# the .upgrade file prevents us from getting stuck in a loop
# when re-execing to activate the venv; at this point, we've
# activated the venv, so it's safe to clear it
if is_post_upgrade:
os.unlink('wheelhouse/.upgraded')
return
if os.path.exists(venv):
try:
# focal installs or upgrades prior to PR 160 could leave the venv
# in a broken state which would prevent subsequent charm upgrades
_load_installed_versions(vpip)
except CalledProcessError:
is_broken_venv = True
else:
is_broken_venv = False
if is_upgrade or is_broken_venv:
# All upgrades should do a full clear of the venv, rather than
# just updating it, to bring in updates to Python itself
shutil.rmtree(venv)
if is_upgrade:
if os.path.exists('wheelhouse/.bootstrapped'):
os.unlink('wheelhouse/.bootstrapped')
# bootstrap wheelhouse
if os.path.exists('wheelhouse'):
pre_eoan = series in ('ubuntu12.04', 'precise',
'ubuntu14.04', 'trusty',
'ubuntu16.04', 'xenial',
'ubuntu18.04', 'bionic')
pydistutils_lines = [
"[easy_install]\n",
"find_links = file://{}/wheelhouse/\n".format(charm_dir),
"no_index=True\n",
"index_url=\n", # deliberately nothing here; disables it.
]
if pre_eoan:
pydistutils_lines.append("allow_hosts = ''\n")
with open('/root/.pydistutils.cfg', 'w') as fp:
# make sure that easy_install also only uses the wheelhouse
# (see https://github.com/pypa/pip/issues/410)
fp.writelines(pydistutils_lines)
if 'centos' in series:
yum_install(packages_needed)
else:
apt_install(packages_needed)
from charms.layer import options
cfg = options.get('basic')
# include packages defined in layer.yaml
if 'centos' in series:
yum_install(cfg.get('packages', []))
else:
apt_install(cfg.get('packages', []))
# if we're using a venv, set it up
if cfg.get('use_venv'):
if not os.path.exists(venv):
series = get_series()
if series in ('ubuntu12.04', 'precise',
'ubuntu14.04', 'trusty'):
apt_install(['python-virtualenv'])
elif 'centos' in series:
yum_install(['python-virtualenv'])
else:
apt_install(['virtualenv'])
cmd = ['virtualenv', '-ppython3', '--never-download', venv]
if cfg.get('include_system_packages'):
cmd.append('--system-site-packages')
check_call(cmd, env=_get_subprocess_env())
os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
pip = vpip
else:
pip = 'pip3'
# save a copy of system pip to prevent `pip3 install -U pip`
# from changing it
if os.path.exists('/usr/bin/pip'):
shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
# we bundle these packages to work around bugs in older versions (such
# as https://github.com/pypa/pip/issues/56), but if the system already
# provided a newer version, downgrading it can cause other problems
_update_if_newer(pip, pre_install_pkgs)
# install the rest of the wheelhouse deps (extract the pkg names into
# a set so that we can ignore the pre-install packages and let pip
# choose the best version in case there are multiple from layer
# conflicts)
_versions = _load_wheelhouse_versions()
_pkgs = _versions.keys() - set(pre_install_pkgs)
# add back the versions such that each package in pkgs is
# <package_name>==<version>.
# This ensures that pip 20.3.4+ will install the packages from the
# wheelhouse without (erroneously) flagging an error.
pkgs = _add_back_versions(_pkgs, _versions)
reinstall_flag = '--force-reinstall'
if not cfg.get('use_venv', True) and pre_eoan:
reinstall_flag = '--ignore-installed'
check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
'--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
env=_get_subprocess_env())
# re-enable installation from pypi
os.remove('/root/.pydistutils.cfg')
# install pyyaml for centos7, since, unlike the ubuntu image, the
# default image for centos doesn't include pyyaml; see the discussion:
# https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
if 'centos' in series:
check_call([pip, 'install', '-U', 'pyyaml'],
env=_get_subprocess_env())
# install python packages from layer options
if cfg.get('python_packages'):
check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
env=_get_subprocess_env())
if not cfg.get('use_venv'):
# restore system pip to prevent `pip3 install -U pip`
# from changing it
if os.path.exists('/usr/bin/pip.save'):
shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
os.remove('/usr/bin/pip.save')
# setup wrappers to ensure envs are used for scripts
install_or_update_charm_env()
for wrapper in ('charms.reactive', 'charms.reactive.sh',
'chlp', 'layer_option'):
src = os.path.join('/usr/local/sbin', 'charm-env')
dst = os.path.join('/usr/local/sbin', wrapper)
if not os.path.exists(dst):
os.symlink(src, dst)
if cfg.get('use_venv'):
shutil.copy2('bin/layer_option', vbin)
else:
shutil.copy2('bin/layer_option', '/usr/local/bin/')
# re-link the charm copy to the wrapper in case charms
# call bin/layer_option directly (as was the old pattern)
os.remove('bin/layer_option')
os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
# flag us as having already bootstrapped so we don't do it again
open('wheelhouse/.bootstrapped', 'w').close()
if is_upgrade:
# flag us as having already upgraded so we don't do it again
open('wheelhouse/.upgraded', 'w').close()
# Ensure that the newly bootstrapped libs are available.
# Note: this only seems to be an issue with namespace packages.
# Non-namespace-package libs (e.g., charmhelpers) are available
# without having to reload the interpreter. :/
reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
def _load_installed_versions(pip):
pip_freeze = check_output([pip, 'freeze']).decode('utf8')
versions = {}
for pkg_ver in pip_freeze.splitlines():
try:
req = Requirement.parse(pkg_ver)
except ValueError:
continue
versions.update({
req.project_name: LooseVersion(ver)
for op, ver in req.specs if op == '=='
})
return versions
def _load_wheelhouse_versions():
versions = {}
for wheel in glob('wheelhouse/*'):
pkg, ver = os.path.basename(wheel).rsplit('-', 1)
# nb: LooseVersion ignores the file extension
versions[pkg.replace('_', '-')] = LooseVersion(ver)
return versions
def _add_back_versions(pkgs, versions):
"""Add back the version strings to each of the packages.
The versions are LooseVersion() from _load_wheelhouse_versions(). This
function strips the ".zip" or ".tar.gz" from the end of the version string
and adds it back to the package in the form of <package_name>==<version>
If a package name is not a key in the versions dictionary, then it is
returned in the list unchanged.
:param pkgs: A list of package names
:type pkgs: List[str]
:param versions: A map of package to LooseVersion
:type versions: Dict[str, LooseVersion]
:returns: A list of (maybe) versioned packages
:rtype: List[str]
"""
def _strip_ext(s):
"""Strip an extension (if it exists) from the string
:param s: the string to strip an extension off if it exists
:type s: str
:returns: string without an extension of .zip or .tar.gz
:rtype: str
"""
for ending in [".zip", ".tar.gz"]:
if s.endswith(ending):
return s[:-len(ending)]
return s
def _maybe_add_version(pkg):
"""Maybe add back the version number to a package if it exists.
Adds the version number, if the package exists in the lexically
captured `versions` dictionary, in the form <pkg>==<version>. Strips
the extension if it exists.
:param pkg: the package name to (maybe) add the version number to.
:type pkg: str
"""
try:
return "{}=={}".format(pkg, _strip_ext(str(versions[pkg])))
except KeyError:
pass
return pkg
return [_maybe_add_version(pkg) for pkg in pkgs]
def _update_if_newer(pip, pkgs):
installed = _load_installed_versions(pip)
wheelhouse = _load_wheelhouse_versions()
for pkg in pkgs:
if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
pkg], env=_get_subprocess_env())
def install_or_update_charm_env():
# On Trusty python3-pkg-resources is not installed
try:
from pkg_resources import parse_version
except ImportError:
apt_install(['python3-pkg-resources'])
from pkg_resources import parse_version
try:
installed_version = parse_version(
check_output(['/usr/local/sbin/charm-env',
'--version']).decode('utf8'))
except (CalledProcessError, FileNotFoundError):
installed_version = parse_version('0.0.0')
try:
bundled_version = parse_version(
check_output(['bin/charm-env',
'--version']).decode('utf8'))
except (CalledProcessError, FileNotFoundError):
bundled_version = parse_version('0.0.0')
if installed_version < bundled_version:
shutil.copy2('bin/charm-env', '/usr/local/sbin/')
def activate_venv():
"""
Activate the venv if enabled in ``layer.yaml``.
This is handled automatically for normal hooks, but actions might
need to invoke this manually, using something like:
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer.basic import activate_venv
activate_venv()
This will ensure that modules installed in the charm's
virtual environment are available to the action.
"""
from charms.layer import options
venv = os.path.abspath('../.venv')
vbin = os.path.join(venv, 'bin')
vpy = os.path.join(vbin, 'python')
use_venv = options.get('basic', 'use_venv')
if use_venv and '.venv' not in sys.executable:
# activate the venv
os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
reload_interpreter(vpy)
layer.patch_options_interface()
layer.import_layer_libs()
def reload_interpreter(python):
"""
Reload the python interpreter to ensure that all deps are available.
Newly installed modules in namespace packages sometimes seemt to
not be picked up by Python 3.
"""
os.execve(python, [python] + list(sys.argv), os.environ)
def apt_install(packages):
"""
Install apt packages.
This ensures a consistent set of options that are often missed but
should really be set.
"""
if isinstance(packages, (str, bytes)):
packages = [packages]
env = _get_subprocess_env()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
cmd = ['apt-get',
'--option=Dpkg::Options::=--force-confold',
'--assume-yes',
'install']
for attempt in range(3):
try:
check_call(cmd + packages, env=env)
except CalledProcessError:
if attempt == 2: # third attempt
raise
try:
# sometimes apt-get update needs to be run
check_call(['apt-get', 'update'], env=env)
except CalledProcessError:
# sometimes it's a dpkg lock issue
pass
sleep(5)
else:
break
def yum_install(packages):
""" Installs packages with yum.
This function largely mimics the apt_install function for consistency.
"""
if packages:
env = os.environ.copy()
cmd = ['yum', '-y', 'install']
for attempt in range(3):
try:
check_call(cmd + packages, env=env)
except CalledProcessError:
if attempt == 2:
raise
try:
check_call(['yum', 'update'], env=env)
except CalledProcessError:
pass
sleep(5)
else:
break
else:
pass
def init_config_states():
import yaml
from charmhelpers.core import hookenv
from charms.reactive import set_state
from charms.reactive import toggle_state
config = hookenv.config()
config_defaults = {}
config_defs = {}
config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
if os.path.exists(config_yaml):
with open(config_yaml) as fp:
config_defs = yaml.safe_load(fp).get('options', {})
config_defaults = {key: value.get('default')
for key, value in config_defs.items()}
for opt in config_defs.keys():
if config.changed(opt):
set_state('config.changed')
set_state('config.changed.{}'.format(opt))
toggle_state('config.set.{}'.format(opt), config.get(opt))
toggle_state('config.default.{}'.format(opt),
config.get(opt) == config_defaults[opt])
def clear_config_states():
from charmhelpers.core import hookenv, unitdata
from charms.reactive import remove_state
config = hookenv.config()
remove_state('config.changed')
for opt in config.keys():
remove_state('config.changed.{}'.format(opt))
remove_state('config.set.{}'.format(opt))
remove_state('config.default.{}'.format(opt))
unitdata.kv().flush()

View File

@ -0,0 +1,114 @@
# Copyright 2014-2016 Canonical Limited.
#
# This file is part of layer-basic, the reactive base layer for Juju.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# This module may only import from the Python standard library.
import os
import sys
import subprocess
import time
'''
execd/preinstall
Read the layer-basic docs for more info on how to use this feature.
https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
'''
def default_execd_dir():
return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
def execd_module_paths(execd_dir=None):
"""Generate a list of full paths to modules within execd_dir."""
if not execd_dir:
execd_dir = default_execd_dir()
if not os.path.exists(execd_dir):
return
for subpath in os.listdir(execd_dir):
module = os.path.join(execd_dir, subpath)
if os.path.isdir(module):
yield module
def execd_submodule_paths(command, execd_dir=None):
"""Generate a list of full paths to the specified command within exec_dir.
"""
for module_path in execd_module_paths(execd_dir):
path = os.path.join(module_path, command)
if os.access(path, os.X_OK) and os.path.isfile(path):
yield path
def execd_sentinel_path(submodule_path):
module_path = os.path.dirname(submodule_path)
execd_path = os.path.dirname(module_path)
module_name = os.path.basename(module_path)
submodule_name = os.path.basename(submodule_path)
return os.path.join(execd_path,
'.{}_{}.done'.format(module_name, submodule_name))
def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
"""Run command for each module within execd_dir which defines it."""
if stderr is None:
stderr = sys.stdout
for submodule_path in execd_submodule_paths(command, execd_dir):
# Only run each execd once. We cannot simply run them in the
# install hook, as potentially storage hooks are run before that.
# We cannot rely on them being idempotent.
sentinel = execd_sentinel_path(submodule_path)
if os.path.exists(sentinel):
continue
try:
subprocess.check_call([submodule_path], stderr=stderr,
universal_newlines=True)
with open(sentinel, 'w') as f:
f.write('{} ran successfully {}\n'.format(submodule_path,
time.ctime()))
f.write('Removing this file will cause it to be run again\n')
except subprocess.CalledProcessError as e:
# Logs get the details. We can't use juju-log, as the
# output may be substantial and exceed command line
# length limits.
print("ERROR ({}) running {}".format(e.returncode, e.cmd),
file=stderr)
print("STDOUT<<EOM", file=stderr)
print(e.output, file=stderr)
print("EOM", file=stderr)
# Unit workload status gets a shorter fail message.
short_path = os.path.relpath(submodule_path)
block_msg = "Error ({}) running {}".format(e.returncode,
short_path)
try:
subprocess.check_call(['status-set', 'blocked', block_msg],
universal_newlines=True)
if stop_on_error:
sys.exit(0) # Leave unit in blocked state.
except Exception:
pass # We care about the exec.d/* failure, not status-set.
if stop_on_error:
sys.exit(e.returncode or 1) # Error state for pre-1.24 Juju
def execd_preinstall(execd_dir=None):
"""Run charm-pre-install for each module within execd_dir."""
execd_run('charm-pre-install', execd_dir=execd_dir)

View File

@ -0,0 +1,924 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import re
import os
import subprocess
import hashlib
import json
import traceback
import random
import string
import tempfile
import yaml
from base64 import b64decode, b64encode
from pathlib import Path
from subprocess import check_output, check_call
from socket import gethostname, getfqdn
from shlex import split
from subprocess import CalledProcessError
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core import host
from charmhelpers.core.templating import render
from charms.reactive import endpoint_from_flag, is_state
from time import sleep
AUTH_SECRET_NS = "kube-system"
AUTH_SECRET_TYPE = "juju.is/token-auth"
db = unitdata.kv()
kubeclientconfig_path = "/root/.kube/config"
gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS"
kubeproxyconfig_path = "/root/cdk/kubeproxyconfig"
certs_dir = Path("/root/cdk")
ca_crt_path = certs_dir / "ca.crt"
server_crt_path = certs_dir / "server.crt"
server_key_path = certs_dir / "server.key"
client_crt_path = certs_dir / "client.crt"
client_key_path = certs_dir / "client.key"
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = "{} --version".format(bin_name).split()
version_string = subprocess.check_output(cmd).decode("utf-8")
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def retry(times, delay_secs):
"""Decorator for retrying a method call.
Args:
times: How many times should we retry before giving up
delay_secs: Delay in secs
Returns: A callable that would return the last call outcome
"""
def retry_decorator(func):
"""Decorator to wrap the function provided.
Args:
func: Provided function should return either True od False
Returns: A callable that would return the last call outcome
"""
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
attempt = 0
while not res and attempt < times:
sleep(delay_secs)
res = func(*args, **kwargs)
if res:
break
attempt += 1
return res
return _wrapped
return retry_decorator
def calculate_resource_checksum(resource):
"""Calculate a checksum for a resource"""
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, "rb") as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def get_resource_checksum_db_key(checksum_prefix, resource):
"""Convert a resource name to a resource checksum database key."""
return checksum_prefix + resource
def migrate_resource_checksums(checksum_prefix, snap_resources):
"""Migrate resource checksums from the old schema to the new one"""
for resource in snap_resources:
new_key = get_resource_checksum_db_key(checksum_prefix, resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = "reactive.files_changed." + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
hookenv.status_set("maintenance", "Checking resources")
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
return True
return False
def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def get_ingress_address(endpoint_name, ignore_addresses=None):
try:
network_info = hookenv.network_get(endpoint_name)
except NotImplementedError:
network_info = {}
if not network_info or "ingress-addresses" not in network_info:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get("private-address")
addresses = network_info["ingress-addresses"]
if ignore_addresses:
hookenv.log("ingress-addresses before filtering: {}".format(addresses))
iter_filter = filter(lambda item: item not in ignore_addresses, addresses)
addresses = list(iter_filter)
hookenv.log("ingress-addresses after filtering: {}".format(addresses))
# Need to prefer non-fan IP addresses due to various issues, e.g.
# https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
# Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
# prioritize those last. Not technically correct, but good enough.
try:
sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731
addresses = sorted(addresses, key=sort_key)
except Exception:
hookenv.log(traceback.format_exc())
return addresses[0]
def get_ingress_address6(endpoint_name):
try:
network_info = hookenv.network_get(endpoint_name)
except NotImplementedError:
network_info = {}
if not network_info or "ingress-addresses" not in network_info:
return None
addresses = network_info["ingress-addresses"]
for addr in addresses:
ip_addr = ipaddress.ip_interface(addr).ip
if ip_addr.version == 6:
return str(ip_addr)
else:
return None
def service_restart(service_name):
hookenv.status_set("maintenance", "Restarting {0} service".format(service_name))
host.service_restart(service_name)
def service_start(service_name):
hookenv.log("Starting {0} service.".format(service_name))
host.service_stop(service_name)
def service_stop(service_name):
hookenv.log("Stopping {0} service.".format(service_name))
host.service_stop(service_name)
def arch():
"""Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes."""
# Get the package architecture for this system.
architecture = check_output(["dpkg", "--print-architecture"]).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode("utf-8")
return architecture
def get_service_ip(service, namespace="kube-system", errors_fatal=True):
try:
output = kubectl(
"get", "service", "--namespace", namespace, service, "--output", "json"
)
except CalledProcessError:
if errors_fatal:
raise
else:
return None
else:
svc = json.loads(output.decode())
return svc["spec"]["clusterIP"]
def kubectl(*args):
"""Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails."""
command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args)
hookenv.log("Executing {}".format(command))
return check_output(command)
def kubectl_success(*args):
"""Runs kubectl with the given args. Returns True if successful, False if
not."""
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
"""Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
"""
# Deletions are a special case
if operation == "delete":
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, "-f", manifest, "--now")
else:
# Guard against an error re-creating the same manifest multiple times
if operation == "create":
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success("get", "-f", manifest):
hookenv.log("Skipping definition for {}".format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, "-f", manifest)
def get_node_name():
kubelet_extra_args = parse_extra_args("kubelet-extra-args")
cloud_provider = kubelet_extra_args.get("cloud-provider", "")
if is_state("endpoint.aws.ready"):
cloud_provider = "aws"
elif is_state("endpoint.gcp.ready"):
cloud_provider = "gce"
elif is_state("endpoint.openstack.ready"):
cloud_provider = "openstack"
elif is_state("endpoint.vsphere.ready"):
cloud_provider = "vsphere"
elif is_state("endpoint.azure.ready"):
cloud_provider = "azure"
if cloud_provider == "aws":
return getfqdn().lower()
else:
return gethostname().lower()
def create_kubeconfig(
kubeconfig,
server,
ca,
key=None,
certificate=None,
user="ubuntu",
context="juju-context",
cluster="juju-cluster",
password=None,
token=None,
keystone=False,
aws_iam_cluster_id=None,
):
"""Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster."""
if not key and not certificate and not password and not token:
raise ValueError("Missing authentication mechanism.")
elif key and not certificate:
raise ValueError("Missing certificate.")
elif not key and certificate:
raise ValueError("Missing key.")
elif token and password:
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
raise ValueError("Token and Password are mutually exclusive.")
old_kubeconfig = Path(kubeconfig)
new_kubeconfig = Path(str(kubeconfig) + ".new")
# Create the config file with the address of the master server.
cmd = (
"kubectl config --kubeconfig={0} set-cluster {1} "
"--server={2} --certificate-authority={3} --embed-certs=true"
)
check_call(split(cmd.format(new_kubeconfig, cluster, server, ca)))
# Delete old users
cmd = "kubectl config --kubeconfig={0} unset users"
check_call(split(cmd.format(new_kubeconfig)))
# Create the credentials using the client flags.
cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format(
new_kubeconfig, user
)
if key and certificate:
cmd = (
"{0} --client-key={1} --client-certificate={2} "
"--embed-certs=true".format(cmd, key, certificate)
)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}"
check_call(split(cmd.format(new_kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = "kubectl config --kubeconfig={0} use-context {1}"
check_call(split(cmd.format(new_kubeconfig, context)))
if keystone:
# create keystone user
cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format(
new_kubeconfig
)
check_call(split(cmd))
# create keystone context
cmd = (
"kubectl config --kubeconfig={0} "
"set-context --cluster={1} "
"--user=keystone-user keystone".format(new_kubeconfig, cluster)
)
check_call(split(cmd))
# use keystone context
cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format(
new_kubeconfig
)
check_call(split(cmd))
# manually add exec command until kubectl can do it for us
with open(new_kubeconfig, "r") as f:
content = f.read()
content = content.replace(
"""- name: keystone-user
user: {}""",
"""- name: keystone-user
user:
exec:
command: "/snap/bin/client-keystone-auth"
apiVersion: "client.authentication.k8s.io/v1beta1"
""",
)
with open(new_kubeconfig, "w") as f:
f.write(content)
if aws_iam_cluster_id:
# create aws-iam context
cmd = (
"kubectl config --kubeconfig={0} "
"set-context --cluster={1} "
"--user=aws-iam-user aws-iam-authenticator"
)
check_call(split(cmd.format(new_kubeconfig, cluster)))
# append a user for aws-iam
cmd = (
"kubectl --kubeconfig={0} config set-credentials "
"aws-iam-user --exec-command=aws-iam-authenticator "
'--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" '
'--exec-arg="-r" --exec-arg="<<insert_arn_here>>" '
"--exec-api-version=client.authentication.k8s.io/v1alpha1"
)
check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id)))
# not going to use aws-iam context by default since we don't have
# the desired arn. This will make the config not usable if copied.
# cmd = 'kubectl config --kubeconfig={0} ' \
# 'use-context aws-iam-authenticator'.format(new_kubeconfig)
# check_call(split(cmd))
if old_kubeconfig.exists():
changed = new_kubeconfig.read_text() != old_kubeconfig.read_text()
else:
changed = True
if changed:
new_kubeconfig.rename(old_kubeconfig)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, "").split()
args = {}
for element in elements:
if "=" in element:
key, _, value = element.partition("=")
args[key] = value
else:
args[element] = "true"
return args
def configure_kubernetes_service(key, service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = key + service
prev_snap_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
args.update(base_args)
args.update(extra_args)
# CIS benchmark action may inject kv config to pass failing tests. Merge
# these after the func args as they should take precedence.
cis_args_key = "cis-" + service
cis_args = db.get(cis_args_key) or {}
args.update(cis_args)
# Remove any args with 'None' values (all k8s args are 'k=v') and
# construct an arg string for use by 'snap set'.
args = {k: v for k, v in args.items() if v is not None}
args = ['--%s="%s"' % arg for arg in args.items()]
args = " ".join(args)
snap_opts = {}
for arg in prev_snap_args:
# remove previous args by setting to null
snap_opts[arg] = "null"
snap_opts["args"] = args
snap_opts = ["%s=%s" % opt for opt in snap_opts.items()]
cmd = ["snap", "set", service] + snap_opts
check_call(cmd)
# Now that we've started doing snap configuration through the "args"
# option, we should never need to clear previous args again.
db.set(prev_args_key, {})
def _snap_common_path(component):
return Path("/var/snap/{}/common".format(component))
def cloud_config_path(component):
return _snap_common_path(component) / "cloud-config.conf"
def _gcp_creds_path(component):
return _snap_common_path(component) / "gcp-creds.json"
def _daemon_env_path(component):
return _snap_common_path(component) / "environment"
def _cloud_endpoint_ca_path(component):
return _snap_common_path(component) / "cloud-endpoint-ca.crt"
def encryption_config_path():
apiserver_snap_common_path = _snap_common_path("kube-apiserver")
encryption_conf_dir = apiserver_snap_common_path / "encryption"
return encryption_conf_dir / "encryption_config.yaml"
def write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag("endpoint.gcp.ready")
creds_path = _gcp_creds_path(component)
with creds_path.open("w") as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(
"[Global]\n" "token-url = nil\n" "multizone = true\n"
)
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith("\n"):
daemon_env += "\n"
else:
daemon_env = ""
if gcp_creds_env_key not in daemon_env:
daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def generate_openstack_cloud_config():
# openstack requires additional credentials setup
openstack = endpoint_from_flag("endpoint.openstack.ready")
lines = [
"[Global]",
"auth-url = {}".format(openstack.auth_url),
"region = {}".format(openstack.region),
"username = {}".format(openstack.username),
"password = {}".format(openstack.password),
"tenant-name = {}".format(openstack.project_name),
"domain-name = {}".format(openstack.user_domain_name),
"tenant-domain-name = {}".format(openstack.project_domain_name),
]
if openstack.endpoint_tls_ca:
lines.append("ca-file = /etc/config/endpoint-ca.cert")
lines.extend(
[
"",
"[LoadBalancer]",
]
)
if openstack.has_octavia in (True, None):
# Newer integrator charm will detect whether underlying OpenStack has
# Octavia enabled so we can set this intelligently. If we're still
# related to an older integrator, though, default to assuming Octavia
# is available.
lines.append("use-octavia = true")
else:
lines.append("use-octavia = false")
lines.append("lb-provider = haproxy")
if openstack.subnet_id:
lines.append("subnet-id = {}".format(openstack.subnet_id))
if openstack.floating_network_id:
lines.append("floating-network-id = {}".format(openstack.floating_network_id))
if openstack.lb_method:
lines.append("lb-method = {}".format(openstack.lb_method))
if openstack.manage_security_groups:
lines.append(
"manage-security-groups = {}".format(openstack.manage_security_groups)
)
if any(
[openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az]
):
lines.append("")
lines.append("[BlockStorage]")
if openstack.bs_version is not None:
lines.append("bs-version = {}".format(openstack.bs_version))
if openstack.trust_device_path is not None:
lines.append("trust-device-path = {}".format(openstack.trust_device_path))
if openstack.ignore_volume_az is not None:
lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az))
return "\n".join(lines) + "\n"
def write_azure_snap_config(component):
azure = endpoint_from_flag("endpoint.azure.ready")
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(
json.dumps(
{
"useInstanceMetadata": True,
"useManagedIdentityExtension": azure.managed_identity,
"subscriptionId": azure.subscription_id,
"resourceGroup": azure.resource_group,
"location": azure.resource_group_location,
"vnetName": azure.vnet_name,
"vnetResourceGroup": azure.vnet_resource_group,
"subnetName": azure.subnet_name,
"securityGroupName": azure.security_group_name,
"loadBalancerSku": "standard",
"securityGroupResourceGroup": azure.security_group_resource_group,
"aadClientId": azure.aad_client_id,
"aadClientSecret": azure.aad_client_secret,
"tenantId": azure.tenant_id,
}
)
)
def configure_kube_proxy(
configure_prefix, api_servers, cluster_cidr, bind_address=None
):
kube_proxy_opts = {}
kube_proxy_opts["cluster-cidr"] = cluster_cidr
kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path
kube_proxy_opts["logtostderr"] = "true"
kube_proxy_opts["v"] = "0"
num_apis = len(api_servers)
kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis]
kube_proxy_opts["hostname-override"] = get_node_name()
if bind_address:
kube_proxy_opts["bind-address"] = bind_address
elif is_ipv6(cluster_cidr):
kube_proxy_opts["bind-address"] = "::"
if host.is_container():
kube_proxy_opts["conntrack-max-per-core"] = "0"
if is_dual_stack(cluster_cidr):
kube_proxy_opts["feature-gates"] = "IPv6DualStack=true"
configure_kubernetes_service(
configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args"
)
def get_unit_number():
return int(hookenv.local_unit().split("/")[1])
def cluster_cidr():
"""Return the cluster CIDR provided by the CNI"""
cni = endpoint_from_flag("cni.available")
if not cni:
return None
config = hookenv.config()
if "default-cni" in config:
# master
default_cni = config["default-cni"]
else:
# worker
kube_control = endpoint_from_flag("kube-control.dns.available")
if not kube_control:
return None
default_cni = kube_control.get_default_cni()
return cni.get_config(default=default_cni)["cidr"]
def is_dual_stack(cidrs):
"""Detect IPv4/IPv6 dual stack from CIDRs"""
return {net.version for net in get_networks(cidrs)} == {4, 6}
def is_ipv4(cidrs):
"""Detect IPv6 from CIDRs"""
return get_ipv4_network(cidrs) is not None
def is_ipv6(cidrs):
"""Detect IPv6 from CIDRs"""
return get_ipv6_network(cidrs) is not None
def is_ipv6_preferred(cidrs):
"""Detect if IPv6 is preffered from CIDRs"""
return get_networks(cidrs)[0].version == 6
def get_networks(cidrs):
"""Convert a comma-separated list of CIDRs to a list of networks."""
if not cidrs:
return []
return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")]
def get_ipv4_network(cidrs):
"""Get the IPv4 network from the given CIDRs or None"""
return {net.version: net for net in get_networks(cidrs)}.get(4)
def get_ipv6_network(cidrs):
"""Get the IPv6 network from the given CIDRs or None"""
return {net.version: net for net in get_networks(cidrs)}.get(6)
def enable_ipv6_forwarding():
"""Enable net.ipv6.conf.all.forwarding in sysctl if it is not already."""
check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"])
def get_bind_addrs(ipv4=True, ipv6=True):
"""Get all global-scoped addresses that we might bind to."""
try:
output = check_output(["ip", "-br", "addr", "show", "scope", "global"])
except CalledProcessError:
# stderr will have any details, and go to the log
hookenv.log("Unable to determine global addresses", hookenv.ERROR)
return []
ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker")
accept_versions = set()
if ipv4:
accept_versions.add(4)
if ipv6:
accept_versions.add(6)
addrs = []
for line in output.decode("utf8").splitlines():
intf, state, *intf_addrs = line.split()
if state != "UP" or any(
intf.startswith(prefix) for prefix in ignore_interfaces
):
continue
for addr in intf_addrs:
ip_addr = ipaddress.ip_interface(addr).ip
if ip_addr.version in accept_versions:
addrs.append(str(ip_addr))
return addrs
class InvalidVMwareHost(Exception):
pass
def _get_vmware_uuid():
serial_id_file = "/sys/class/dmi/id/product_serial"
# The serial id from VMWare VMs comes in following format:
# VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54
try:
with open(serial_id_file, "r") as f:
serial_string = f.read().strip()
if "VMware-" not in serial_string:
hookenv.log(
"Unable to find VMware ID in "
"product_serial: {}".format(serial_string)
)
raise InvalidVMwareHost
serial_string = (
serial_string.split("VMware-")[1].replace(" ", "").replace("-", "")
)
uuid = "%s-%s-%s-%s-%s" % (
serial_string[0:8],
serial_string[8:12],
serial_string[12:16],
serial_string[16:20],
serial_string[20:32],
)
except IOError as err:
hookenv.log("Unable to read UUID from sysfs: {}".format(err))
uuid = "UNKNOWN"
return uuid
def token_generator(length=32):
"""Generate a random token for use in account tokens.
param: length - the length of the token to generate
"""
alpha = string.ascii_letters + string.digits
token = "".join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
def get_secret_names():
"""Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
try:
output = kubectl(
"get",
"secrets",
"-n",
AUTH_SECRET_NS,
"--field-selector",
"type={}".format(AUTH_SECRET_TYPE),
"-o",
"json",
).decode("UTF-8")
except (CalledProcessError, FileNotFoundError):
# The api server may not be up, or we may be trying to run kubelet before
# the snap is installed. Send back an empty dict.
hookenv.log("Unable to get existing secrets", level=hookenv.WARNING)
return {}
secrets = json.loads(output)
secret_names = {}
if "items" in secrets:
for secret in secrets["items"]:
try:
secret_id = secret["metadata"]["name"]
username_b64 = secret["data"]["username"].encode("UTF-8")
except (KeyError, TypeError):
# CK secrets will have populated 'data', but not all secrets do
continue
secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id
return secret_names
def generate_rfc1123(length=10):
"""Generate a random string compliant with RFC 1123.
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
param: length - the length of the string to generate
"""
length = 253 if length > 253 else length
valid_chars = string.ascii_lowercase + string.digits
rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length))
return rand_str
def create_secret(token, username, user, groups=None):
secrets = get_secret_names()
if username in secrets:
# Use existing secret ID if one exists for our username
secret_id = secrets[username]
else:
# secret IDs must be unique and rfc1123 compliant
sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower())
secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10))
# The authenticator expects tokens to be in the form user::token
token_delim = "::"
if token_delim not in token:
token = "{}::{}".format(user, token)
context = {
"type": AUTH_SECRET_TYPE,
"secret_name": secret_id,
"secret_namespace": AUTH_SECRET_NS,
"user": b64encode(user.encode("UTF-8")).decode("utf-8"),
"username": b64encode(username.encode("UTF-8")).decode("utf-8"),
"password": b64encode(token.encode("UTF-8")).decode("utf-8"),
"groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "",
}
with tempfile.NamedTemporaryFile() as tmp_manifest:
render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context)
if kubectl_manifest("apply", tmp_manifest.name):
hookenv.log("Created secret for {}".format(username))
return True
else:
hookenv.log("WARN: Unable to create secret for {}".format(username))
return False
def get_secret_password(username):
"""Get the password for the given user from the secret that CK created."""
try:
output = kubectl(
"get",
"secrets",
"-n",
AUTH_SECRET_NS,
"--field-selector",
"type={}".format(AUTH_SECRET_TYPE),
"-o",
"json",
).decode("UTF-8")
except CalledProcessError:
# NB: apiserver probably isn't up. This can happen on boostrap or upgrade
# while trying to build kubeconfig files. If we need the 'admin' token during
# this time, pull it directly out of the kubeconfig file if possible.
token = None
if username == "admin":
admin_kubeconfig = Path("/root/.kube/config")
if admin_kubeconfig.exists():
data = yaml.safe_load(admin_kubeconfig.read_text())
try:
token = data["users"][0]["user"]["token"]
except (KeyError, IndexError, TypeError):
pass
return token
except FileNotFoundError:
# New deployments may ask for a token before the kubectl snap is installed.
# Give them nothing!
return None
secrets = json.loads(output)
if "items" in secrets:
for secret in secrets["items"]:
try:
data_b64 = secret["data"]
password_b64 = data_b64["password"].encode("UTF-8")
username_b64 = data_b64["username"].encode("UTF-8")
except (KeyError, TypeError):
# CK authn secrets will have populated 'data', but not all secrets do
continue
password = b64decode(password_b64).decode("UTF-8")
secret_user = b64decode(username_b64).decode("UTF-8")
if username == secret_user:
return password
return None

View File

@ -0,0 +1,26 @@
import os
from pathlib import Path
import yaml
_CHARM_PATH = Path(os.environ.get('JUJU_CHARM_DIR', '.'))
_DEFAULT_FILE = _CHARM_PATH / 'layer.yaml'
_CACHE = {}
def get(section=None, option=None, layer_file=_DEFAULT_FILE):
if option and not section:
raise ValueError('Cannot specify option without section')
layer_file = (_CHARM_PATH / layer_file).resolve()
if layer_file not in _CACHE:
with layer_file.open() as fp:
_CACHE[layer_file] = yaml.safe_load(fp.read())
data = _CACHE[layer_file].get('options', {})
if section:
data = data.get(section, {})
if option:
data = data.get(option)
return data

View File

@ -0,0 +1,189 @@
import inspect
import errno
import subprocess
import yaml
from enum import Enum
from functools import wraps
from pathlib import Path
from charmhelpers.core import hookenv
from charms import layer
_orig_call = subprocess.call
_statuses = {'_initialized': False,
'_finalized': False}
class WorkloadState(Enum):
"""
Enum of the valid workload states.
Valid options are:
* `WorkloadState.MAINTENANCE`
* `WorkloadState.BLOCKED`
* `WorkloadState.WAITING`
* `WorkloadState.ACTIVE`
"""
# note: order here determines precedence of state
MAINTENANCE = 'maintenance'
BLOCKED = 'blocked'
WAITING = 'waiting'
ACTIVE = 'active'
def maintenance(message):
"""
Set the status to the `MAINTENANCE` state with the given operator message.
# Parameters
`message` (str): Message to convey to the operator.
"""
status_set(WorkloadState.MAINTENANCE, message)
def maint(message):
"""
Shorthand alias for
[maintenance](status.md#charms.layer.status.maintenance).
# Parameters
`message` (str): Message to convey to the operator.
"""
maintenance(message)
def blocked(message):
"""
Set the status to the `BLOCKED` state with the given operator message.
# Parameters
`message` (str): Message to convey to the operator.
"""
status_set(WorkloadState.BLOCKED, message)
def waiting(message):
"""
Set the status to the `WAITING` state with the given operator message.
# Parameters
`message` (str): Message to convey to the operator.
"""
status_set(WorkloadState.WAITING, message)
def active(message):
"""
Set the status to the `ACTIVE` state with the given operator message.
# Parameters
`message` (str): Message to convey to the operator.
"""
status_set(WorkloadState.ACTIVE, message)
def status_set(workload_state, message):
"""
Set the status to the given workload state with a message.
# Parameters
`workload_state` (WorkloadState or str): State of the workload. Should be
a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
member, or the string value of one of those members.
`message` (str): Message to convey to the operator.
"""
if not isinstance(workload_state, WorkloadState):
workload_state = WorkloadState(workload_state)
if workload_state is WorkloadState.MAINTENANCE:
_status_set_immediate(workload_state, message)
return
layer = _find_calling_layer()
_statuses.setdefault(workload_state, []).append((layer, message))
if not _statuses['_initialized'] or _statuses['_finalized']:
# We either aren't initialized, so the finalizer may never be run,
# or the finalizer has already run, so it won't run again. In either
# case, we need to manually invoke it to ensure the status gets set.
_finalize()
def _find_calling_layer():
for frame in inspect.stack():
# switch to .filename when trusty (Python 3.4) is EOL
fn = Path(frame[1])
if fn.parent.stem not in ('reactive', 'layer', 'charms'):
continue
layer_name = fn.stem
if layer_name == 'status':
continue # skip our own frames
return layer_name
return None
def _initialize():
if not _statuses['_initialized']:
if layer.options.get('status', 'patch-hookenv'):
_patch_hookenv()
hookenv.atexit(_finalize)
_statuses['_initialized'] = True
def _finalize():
if _statuses['_initialized']:
# If we haven't been initialized, we can't truly be finalized.
# This makes things more efficient if an action sets a status
# but subsequently starts the reactive bus.
_statuses['_finalized'] = True
charm_name = hookenv.charm_name()
charm_dir = Path(hookenv.charm_dir())
with charm_dir.joinpath('layer.yaml').open() as fp:
includes = yaml.safe_load(fp.read()).get('includes', [])
layer_order = includes + [charm_name]
for workload_state in WorkloadState:
if workload_state not in _statuses:
continue
if not _statuses[workload_state]:
continue
def _get_key(record):
layer_name, message = record
if layer_name in layer_order:
return layer_order.index(layer_name)
else:
return 0
sorted_statuses = sorted(_statuses[workload_state], key=_get_key)
layer_name, message = sorted_statuses[-1]
_status_set_immediate(workload_state, message)
break
def _status_set_immediate(workload_state, message):
workload_state = workload_state.value
try:
hookenv.log('status-set: {}: {}'.format(workload_state, message),
hookenv.INFO)
ret = _orig_call(['status-set', workload_state, message])
if ret == 0:
return
except OSError as e:
# ignore status-set not available on older controllers
if e.errno != errno.ENOENT:
raise
def _patch_hookenv():
# we can't patch hookenv.status_set directly because other layers may have
# already imported it into their namespace, so we have to patch sp.call
subprocess.call = _patched_call
@wraps(_orig_call)
def _patched_call(cmd, *args, **kwargs):
if not isinstance(cmd, list) or cmd[0] != 'status-set':
return _orig_call(cmd, *args, **kwargs)
_, workload_state, message = cmd
status_set(workload_state, message)
return 0 # make hookenv.status_set not emit spurious failure logs

View File

@ -0,0 +1,68 @@
# Copyright 2015-2016 Canonical Ltd.
#
# This file is part of the Leadership Layer for Juju.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core import hookenv
from charmhelpers.core import unitdata
from charms import reactive
from charms.reactive import not_unless
__all__ = ['leader_get', 'leader_set']
@not_unless('leadership.is_leader')
def leader_set(*args, **kw):
'''Change leadership settings, per charmhelpers.core.hookenv.leader_set.
Settings may either be passed in as a single dictionary, or using
keyword arguments. All values must be strings.
The leadership.set.{key} reactive state will be set while the
leadership hook environment setting remains set.
Changed leadership settings will set the leadership.changed.{key}
and leadership.changed states. These states will remain set until
the following hook.
These state changes take effect immediately on the leader, and
in future hooks run on non-leaders. In this way both leaders and
non-leaders can share handlers, waiting on these states.
'''
if args:
if len(args) > 1:
raise TypeError('leader_set() takes 1 positional argument but '
'{} were given'.format(len(args)))
else:
settings = dict(args[0])
else:
settings = {}
settings.update(kw)
previous = unitdata.kv().getrange('leadership.settings.', strip=True)
for key, value in settings.items():
if value != previous.get(key):
reactive.set_state('leadership.changed.{}'.format(key))
reactive.set_state('leadership.changed')
reactive.helpers.toggle_state('leadership.set.{}'.format(key),
value is not None)
hookenv.leader_set(settings)
unitdata.kv().update(settings, prefix='leadership.settings.')
def leader_get(attribute=None):
'''Return leadership settings, per charmhelpers.core.hookenv.leader_get.'''
return hookenv.leader_get(attribute)

20
calico/make_docs Normal file
View File

@ -0,0 +1,20 @@
#!.tox/py3/bin/python
import os
import sys
from shutil import rmtree
from unittest.mock import patch
import pydocmd.__main__
with patch('charmhelpers.core.hookenv.metadata') as metadata:
sys.path.insert(0, 'lib')
sys.path.insert(1, 'reactive')
print(sys.argv)
if len(sys.argv) == 1:
sys.argv.extend(['build'])
pydocmd.__main__.main()
rmtree('_build')
if os.path.exists('.unit-state.db'):
os.remove('.unit-state.db')

46
calico/metadata.yaml Normal file
View File

@ -0,0 +1,46 @@
"name": "calico"
"summary": "A robust Software Defined Network from Project Calico"
"maintainers":
- "Tim Van Steenburgh <tim.van.steenburgh@canonical.com>"
- "George Kraft <george.kraft@canonical.com>"
- "Konstantinos Tsakalozos <kos.tsakalozos@canonical.com>"
- "Mike Wilson <mike.wilson@canonical.com>"
- "Kevin Monroe <kevin.monroe@canonical.com>"
- "Joe Borg <joseph.borg@canonical.com>"
"description": |
Deploys Calico as a background service and configures CNI for use with
calico on any principal charm that implements the kubernetes-cni interface.
"tags":
- "networking"
"series":
- "focal"
- "bionic"
- "xenial"
"requires":
"etcd":
"interface": "etcd"
"cni":
"interface": "kubernetes-cni"
"scope": "container"
"resources":
"calico":
"type": "file"
"filename": "calico.tar.gz"
"description": "Calico resource tarball for amd64"
"calico-arm64":
"type": "file"
"filename": "calico.tar.gz"
"description": "Calico resource tarball for arm64"
"calico-upgrade":
"type": "file"
"filename": "calico-upgrade.tar.gz"
"description": "calico-upgrade tool for amd64"
"calico-upgrade-arm64":
"type": "file"
"filename": "calico-upgrade.tar.gz"
"description": "calico-upgrade tool for arm64"
"calico-node-image":
"type": "file"
"filename": "calico-node-image.tar.gz"
"description": "calico-node container image"
"subordinate": !!bool "true"

16
calico/pydocmd.yml Normal file
View File

@ -0,0 +1,16 @@
site_name: 'Status Management Layer'
generate:
- status.md:
- charms.layer.status.WorkloadState
- charms.layer.status.maintenance
- charms.layer.status.maint
- charms.layer.status.blocked
- charms.layer.status.waiting
- charms.layer.status.active
- charms.layer.status.status_set
pages:
- Status Management Layer: status.md
gens_dir: docs

View File

831
calico/reactive/calico.py Normal file
View File

@ -0,0 +1,831 @@
import os
import yaml
import gzip
import traceback
import ipaddress
import calico_upgrade
from conctl import getContainerRuntimeCtl
from socket import gethostname
from subprocess import check_call, check_output, CalledProcessError, STDOUT
from charms.leadership import leader_get, leader_set
from charms.reactive import when, when_not, when_any, set_state, remove_state
from charms.reactive import hook, is_state
from charms.reactive import endpoint_from_flag, endpoint_from_name
from charms.reactive import data_changed, any_file_changed
from charms.reactive import register_trigger
from charmhelpers.core.hookenv import (
log,
resource_get,
network_get,
unit_private_ip,
is_leader,
local_unit,
config as charm_config,
atexit,
env_proxy_settings
)
from charmhelpers.core.host import (
arch,
service,
service_restart,
service_running
)
from charmhelpers.core.templating import render
from charms.layer import kubernetes_common, status
from charms.layer.kubernetes_common import kubectl
# TODO:
# - Handle the 'stop' hook by stopping and uninstalling all the things.
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
try:
CTL = getContainerRuntimeCtl()
set_state('calico.ctl.ready')
except RuntimeError:
log(traceback.format_exc())
remove_state('calico.ctl.ready')
CALICOCTL_PATH = '/opt/calicoctl'
ETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')
ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
CALICO_UPGRADE_DIR = '/opt/calico-upgrade'
register_trigger(
when="cni.kubeconfig.changed", clear_flag="calico.service.installed"
)
@hook('upgrade-charm')
def upgrade_charm():
remove_state('calico.binaries.installed')
remove_state('calico.cni.configured')
remove_state('calico.service.installed')
remove_state('calico.pool.configured')
remove_state('calico.npc.deployed')
remove_state('calico.image.pulled')
remove_state('calico.bgp.globals.configured')
remove_state('calico.node.configured')
remove_state('calico.bgp.peers.configured')
try:
log('Deleting /etc/cni/net.d/10-calico.conf')
os.remove('/etc/cni/net.d/10-calico.conf')
except FileNotFoundError as e:
log(e)
if is_leader() and not leader_get('calico-v3-data-ready'):
leader_set({
'calico-v3-data-migration-needed': True,
'calico-v3-npc-cleanup-needed': True,
'calico-v3-completion-needed': True
})
cni = endpoint_from_name('cni')
cni.manage_flags()
@when('leadership.is_leader', 'leadership.set.calico-v3-data-migration-needed',
'etcd.available', 'calico.etcd-credentials.installed')
def upgrade_v3_migrate_data():
status.maintenance('Migrating data to Calico 3')
try:
calico_upgrade.configure()
calico_upgrade.dry_run()
calico_upgrade.start()
except Exception:
log(traceback.format_exc())
message = 'Calico upgrade failed, see debug log'
status.blocked(message)
return
leader_set({'calico-v3-data-migration-needed': None})
@when('leadership.is_leader')
@when_not('leadership.set.calico-v3-data-migration-needed')
def v3_data_ready():
leader_set({'calico-v3-data-ready': True})
@when('leadership.is_leader', 'leadership.set.calico-v3-data-ready',
'leadership.set.calico-v3-npc-cleanup-needed')
def upgrade_v3_npc_cleanup():
status.maintenance('Cleaning up Calico 2 policy controller')
resources = [
('Deployment', 'kube-system', 'calico-policy-controller'),
('ClusterRoleBinding', None, 'calico-policy-controller'),
('ClusterRole', None, 'calico-policy-controller'),
('ServiceAccount', 'kube-system', 'calico-policy-controller')
]
for kind, namespace, name in resources:
args = ['delete', '--ignore-not-found', kind, name]
if namespace:
args += ['-n', namespace]
try:
kubectl(*args)
except CalledProcessError:
log('Failed to cleanup %s %s %s' % (kind, namespace, name))
return
leader_set({'calico-v3-npc-cleanup-needed': None})
@when('leadership.is_leader', 'leadership.set.calico-v3-completion-needed',
'leadership.set.calico-v3-data-ready', 'calico.binaries.installed',
'calico.service.installed', 'calico.npc.deployed')
@when_not('leadership.set.calico-v3-npc-cleanup-needed')
def upgrade_v3_complete():
status.maintenance('Completing Calico 3 upgrade')
try:
calico_upgrade.configure()
calico_upgrade.complete()
calico_upgrade.cleanup()
except Exception:
log(traceback.format_exc())
message = 'Calico upgrade failed, see debug log'
status.blocked(message)
return
leader_set({'calico-v3-completion-needed': None})
@when('leadership.set.calico-v3-data-ready')
@when_not('calico.binaries.installed')
def install_calico_binaries():
''' Unpack the Calico binaries. '''
# on intel, the resource is called 'calico'; other arches have a suffix
architecture = arch()
if architecture == "amd64":
resource_name = 'calico'
else:
resource_name = 'calico-{}'.format(architecture)
try:
archive = resource_get(resource_name)
except Exception:
message = 'Error fetching the calico resource.'
log(message)
status.blocked(message)
return
if not archive:
message = 'Missing calico resource.'
log(message)
status.blocked(message)
return
filesize = os.stat(archive).st_size
if filesize < 1000000:
message = 'Incomplete calico resource'
log(message)
status.blocked(message)
return
status.maintenance('Unpacking calico resource.')
charm_dir = os.getenv('CHARM_DIR')
unpack_path = os.path.join(charm_dir, 'files', 'calico')
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfz', archive, '-C', unpack_path]
log(cmd)
check_call(cmd)
apps = [
{'name': 'calicoctl', 'path': CALICOCTL_PATH},
{'name': 'calico', 'path': '/opt/cni/bin'},
{'name': 'calico-ipam', 'path': '/opt/cni/bin'},
]
for app in apps:
unpacked = os.path.join(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
check_call(install)
calicoctl_path = '/usr/local/bin/calicoctl'
render('calicoctl', calicoctl_path, {})
os.chmod(calicoctl_path, 0o775)
set_state('calico.binaries.installed')
@when('calico.binaries.installed', 'etcd.available')
def update_calicoctl_env():
env = get_calicoctl_env()
lines = ['export %s=%s' % item for item in sorted(env.items())]
output = '\n'.join(lines)
with open('/opt/calicoctl/calicoctl.env', 'w') as f:
f.write(output)
@when('calico.binaries.installed')
@when_not('etcd.connected')
def blocked_without_etcd():
status.blocked('Waiting for relation to etcd')
@when('etcd.tls.available')
@when_not('calico.etcd-credentials.installed')
def install_etcd_credentials():
etcd = endpoint_from_flag('etcd.available')
etcd.save_client_credentials(ETCD_KEY_PATH, ETCD_CERT_PATH, ETCD_CA_PATH)
# register initial etcd data so that we can detect changes
data_changed('calico.etcd.data', (etcd.get_connection_string(),
etcd.get_client_credentials()))
set_state('calico.etcd-credentials.installed')
@when('etcd.tls.available', 'calico.service.installed')
def check_etcd_changes():
etcd = endpoint_from_flag('etcd.available')
if data_changed('calico.etcd.data', (etcd.get_connection_string(),
etcd.get_client_credentials())):
etcd.save_client_credentials(ETCD_KEY_PATH,
ETCD_CERT_PATH,
ETCD_CA_PATH)
remove_state('calico.service.installed')
remove_state('calico.npc.deployed')
remove_state('calico.cni.configured')
def get_mtu():
''' Get user-specified MTU size, adjusted to make room for encapsulation
headers. https://docs.projectcalico.org/networking/mtu
'''
mtu = charm_config('veth-mtu')
if not mtu:
return None
if charm_config('vxlan') != 'Never':
return mtu - 50
elif charm_config('ipip') != 'Never':
return mtu - 20
return mtu
def get_bind_address():
''' Returns a non-fan bind address for the cni endpoint '''
try:
data = network_get('cni')
except NotImplementedError:
# Juju < 2.1
return unit_private_ip()
if 'bind-addresses' not in data:
# Juju < 2.3
return unit_private_ip()
for bind_address in data['bind-addresses']:
if bind_address['interfacename'].startswith('fan-'):
continue
return bind_address['addresses'][0]['address']
# If we made it here, we didn't find a non-fan CNI bind-address, which is
# unexpected. Let's log a message and play it safe.
log('Could not find a non-fan bind-address. Using private-address.')
return unit_private_ip()
@when('leadership.is_leader', 'leadership.set.calico-v3-data-ready')
@when_not('leadership.set.calico-node-token')
def create_calico_node_token():
''' Create the system:calico-node user token '''
status.maintenance('Creating system:calico-node user token')
token = kubernetes_common.token_generator()
user = 'system:calico-node'
success = kubernetes_common.create_secret(
token=token,
username=user,
user=user
)
if not success:
log('Failed to create system:calico-node user token, will retry')
status.waiting('Waiting to retry creating calico-node token')
return
# create_secret may have added the <user>:: prefix. Get the new token.
token = kubernetes_common.get_secret_password(user)
if not token:
log('Failed to get system:calico-node user token, will retry')
status.waiting('Waiting to retry creating calico-node token')
return
leader_set({'calico-node-token': token})
@when('calico.binaries.installed', 'etcd.available',
'calico.etcd-credentials.installed', 'cni.kubeconfig.available',
'leadership.set.calico-node-token', 'leadership.set.calico-v3-data-ready')
@when_not('calico.service.installed')
def install_calico_service():
''' Install the calico-node systemd service. '''
status.maintenance('Installing calico-node service.')
with open(kubernetes_common.kubeclientconfig_path) as f:
kubeconfig = yaml.safe_load(f)
any_file_changed([kubernetes_common.kubeclientconfig_path])
kubeconfig['users'] = [{
'name': 'calico-node',
'user': {
'token': leader_get('calico-node-token')
}
}]
kubeconfig['contexts'][0]['context']['user'] = 'calico-node'
with open('/opt/calicoctl/kubeconfig', 'w') as f:
yaml.dump(kubeconfig, f)
etcd = endpoint_from_flag('etcd.available')
service_path = os.path.join(os.sep, 'lib', 'systemd', 'system',
'calico-node.service')
ip_versions = {net.version for net in get_networks(charm_config('cidr'))}
ip4 = get_bind_address() if 4 in ip_versions else "none"
ip6 = "autodetect" if 6 in ip_versions else "none"
render('calico-node.service', service_path, {
'connection_string': etcd.get_connection_string(),
'etcd_key_path': ETCD_KEY_PATH,
'etcd_ca_path': ETCD_CA_PATH,
'etcd_cert_path': ETCD_CERT_PATH,
'nodename': gethostname(),
# specify IP so calico doesn't grab a silly one from, say, lxdbr0
'ip': ip4,
'ip6': ip6,
'mtu': get_mtu(),
'calico_node_image': charm_config('calico-node-image'),
'ignore_loose_rpf': charm_config('ignore-loose-rpf'),
'lc_all': os.environ.get('LC_ALL', 'C.UTF-8'),
'lang': os.environ.get('LANG', 'C.UTF-8')
})
check_call(['systemctl', 'daemon-reload'])
service_restart('calico-node')
service('enable', 'calico-node')
remove_state('cni.kubeconfig.changed')
set_state('calico.service.installed')
@when('config.changed.veth-mtu')
def configure_mtu():
remove_state('calico.service.installed')
remove_state('calico.cni.configured')
@when('config.changed.ignore-loose-rpf')
def ignore_loose_rpf_changed():
remove_state('calico.service.installed')
@when('calico.binaries.installed', 'etcd.available',
'calico.etcd-credentials.installed',
'leadership.set.calico-v3-data-ready')
@when_not('calico.pool.configured')
def configure_calico_pool():
''' Configure Calico IP pool. '''
config = charm_config()
if not config['manage-pools']:
log('Skipping pool configuration')
set_state('calico.pool.configured')
return
status.maintenance('Configuring Calico IP pool')
try:
# remove unrecognized pools, and default pool if CIDR doesn't match
pools = calicoctl_get('pool')['items']
cidrs = tuple(cidr.strip() for cidr in config['cidr'].split(','))
names = tuple('ipv{}'.format(get_network(cidr).version)
for cidr in cidrs)
pool_names_to_delete = [
pool['metadata']['name'] for pool in pools
if pool['metadata']['name'] not in names
or pool['spec']['cidr'] not in cidrs
]
for pool_name in pool_names_to_delete:
log('Deleting pool: %s' % pool_name)
calicoctl('delete', 'pool', pool_name, '--skip-not-exists')
for cidr, name in zip(cidrs, names):
# configure the default pool
pool = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'IPPool',
'metadata': {
'name': name,
},
'spec': {
'cidr': cidr,
'ipipMode': config['ipip'],
'vxlanMode': config['vxlan'],
'natOutgoing': config['nat-outgoing'],
}
}
calicoctl_apply(pool)
except CalledProcessError:
log(traceback.format_exc())
if config['ipip'] != 'Never' and config['vxlan'] != 'Never':
status.blocked('ipip and vxlan configs are in conflict')
else:
status.waiting('Waiting to retry calico pool configuration')
return
set_state('calico.pool.configured')
@when_any('config.changed.ipip', 'config.changed.nat-outgoing',
'config.changed.cidr', 'config.changed.manage-pools',
'config.changed.vxlan')
def reconfigure_calico_pool():
''' Reconfigure the Calico IP pool '''
remove_state('calico.pool.configured')
@when('etcd.available', 'cni.is-worker', 'leadership.set.calico-v3-data-ready')
@when_not('calico.cni.configured')
def configure_cni():
''' Configure Calico CNI. '''
status.maintenance('Configuring Calico CNI')
cni = endpoint_from_flag('cni.is-worker')
etcd = endpoint_from_flag('etcd.available')
os.makedirs('/etc/cni/net.d', exist_ok=True)
ip_versions = {net.version for net in get_networks(charm_config('cidr'))}
context = {
'connection_string': etcd.get_connection_string(),
'etcd_key_path': ETCD_KEY_PATH,
'etcd_cert_path': ETCD_CERT_PATH,
'etcd_ca_path': ETCD_CA_PATH,
'kubeconfig_path': '/opt/calicoctl/kubeconfig',
'mtu': get_mtu(),
'assign_ipv4': 'true' if 4 in ip_versions else 'false',
'assign_ipv6': 'true' if 6 in ip_versions else 'false',
}
render('10-calico.conflist', '/etc/cni/net.d/10-calico.conflist', context)
config = charm_config()
cni.set_config(cidr=config['cidr'], cni_conf_file='10-calico.conflist')
set_state('calico.cni.configured')
@when('etcd.available', 'cni.is-master')
@when_not('calico.cni.configured')
def configure_master_cni():
status.maintenance('Configuring Calico CNI')
cni = endpoint_from_flag('cni.is-master')
config = charm_config()
cni.set_config(cidr=config['cidr'], cni_conf_file='10-calico.conflist')
set_state('calico.cni.configured')
@when_any('config.changed.cidr')
def reconfigure_cni():
remove_state('calico.cni.configured')
@when('etcd.available', 'calico.cni.configured',
'calico.service.installed', 'leadership.is_leader',
'leadership.set.calico-v3-data-ready')
@when_not('calico.npc.deployed')
def deploy_network_policy_controller():
''' Deploy the Calico network policy controller. '''
status.maintenance('Deploying network policy controller.')
etcd = endpoint_from_flag('etcd.available')
context = {
'connection_string': etcd.get_connection_string(),
'etcd_key_path': ETCD_KEY_PATH,
'etcd_cert_path': ETCD_CERT_PATH,
'etcd_ca_path': ETCD_CA_PATH,
'calico_policy_image': charm_config('calico-policy-image'),
'etcd_cert_last_modified': os.path.getmtime(ETCD_CERT_PATH)
}
render('policy-controller.yaml', '/tmp/policy-controller.yaml', context)
try:
kubectl('apply', '-f', '/tmp/policy-controller.yaml')
set_state('calico.npc.deployed')
except CalledProcessError as e:
status.waiting('Waiting for kubernetes')
log(str(e))
@when('calico.binaries.installed', 'etcd.available',
'leadership.set.calico-v3-data-ready')
@when_not('calico.bgp.globals.configured')
def configure_bgp_globals():
status.maintenance('Configuring BGP globals')
config = charm_config()
try:
try:
bgp_config = calicoctl_get('bgpconfig', 'default')
except CalledProcessError as e:
if b'resource does not exist' in e.output:
log('default BGPConfiguration does not exist')
bgp_config = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'BGPConfiguration',
'metadata': {
'name': 'default'
},
'spec': {}
}
else:
raise
spec = bgp_config['spec']
spec['asNumber'] = config['global-as-number']
spec['nodeToNodeMeshEnabled'] = config['node-to-node-mesh']
spec['serviceClusterIPs'] = [
{'cidr': cidr}
for cidr in config['bgp-service-cluster-ips'].split()
]
spec['serviceExternalIPs'] = [
{'cidr': cidr}
for cidr in config['bgp-service-external-ips'].split()
]
calicoctl_apply(bgp_config)
except CalledProcessError:
log(traceback.format_exc())
status.waiting('Waiting to retry BGP global configuration')
return
set_state('calico.bgp.globals.configured')
@when_any('config.changed.global-as-number',
'config.changed.node-to-node-mesh',
'config.changed.bgp-service-cluster-ips',
'config.changed.bgp-service-external-ips')
def reconfigure_bgp_globals():
remove_state('calico.bgp.globals.configured')
@when('calico.binaries.installed', 'etcd.available',
'leadership.set.calico-v3-data-ready')
@when_not('calico.node.configured')
def configure_node():
status.maintenance('Configuring Calico node')
node_name = gethostname()
as_number = get_unit_as_number()
route_reflector_cluster_id = get_route_reflector_cluster_id()
try:
node = calicoctl_get('node', node_name)
node['spec']['bgp']['asNumber'] = as_number
node['spec']['bgp']['routeReflectorClusterID'] = \
route_reflector_cluster_id
calicoctl_apply(node)
except CalledProcessError:
log(traceback.format_exc())
status.waiting('Waiting to retry Calico node configuration')
return
set_state('calico.node.configured')
@when_any('config.changed.subnet-as-numbers', 'config.changed.unit-as-numbers',
'config.changed.route-reflector-cluster-ids')
def reconfigure_node():
remove_state('calico.node.configured')
@when('calico.binaries.installed', 'etcd.available',
'leadership.set.calico-v3-data-ready')
@when_not('calico.bgp.peers.configured')
def configure_bgp_peers():
status.maintenance('Configuring BGP peers')
peers = []
# Global BGP peers
config = charm_config()
peers += yaml.safe_load(config['global-bgp-peers'])
# Subnet-scoped BGP peers
subnet_bgp_peers = yaml.safe_load(config['subnet-bgp-peers'])
subnets = filter_local_subnets(subnet_bgp_peers)
for subnet in subnets:
peers += subnet_bgp_peers[str(subnet)]
# Unit-scoped BGP peers
unit_id = get_unit_id()
unit_bgp_peers = yaml.safe_load(config['unit-bgp-peers'])
if unit_id in unit_bgp_peers:
peers += unit_bgp_peers[unit_id]
# Give names to peers
safe_unit_name = local_unit().replace('/', '-')
named_peers = {
# name must consist of lower case alphanumeric characters, '-' or '.'
'%s-%s-%s' % (safe_unit_name, peer['address'].replace(':', '-'),
peer['as-number']): peer
for peer in peers
}
try:
node_name = gethostname()
for peer_name, peer in named_peers.items():
peer_def = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'BGPPeer',
'metadata': {
'name': peer_name,
},
'spec': {
'node': node_name,
'peerIP': peer['address'],
'asNumber': peer['as-number']
}
}
calicoctl_apply(peer_def)
# Delete unrecognized peers
existing_peers = calicoctl_get('bgppeers')['items']
existing_peers = [peer['metadata']['name'] for peer in existing_peers]
peers_to_delete = [
peer for peer in existing_peers
if peer.startswith(safe_unit_name + '-')
and peer not in named_peers
]
for peer in peers_to_delete:
calicoctl('delete', 'bgppeer', peer)
except CalledProcessError:
log(traceback.format_exc())
status.waiting('Waiting to retry BGP peer configuration')
return
set_state('calico.bgp.peers.configured')
@when_any('config.changed.global-bgp-peers', 'config.changed.subnet-bgp-peers',
'config.changed.unit-bgp-peers')
def reconfigure_bgp_peers():
remove_state('calico.bgp.peers.configured')
@atexit
def ready():
preconditions = [
'calico.service.installed', 'calico.pool.configured',
'calico.cni.configured', 'calico.bgp.globals.configured',
'calico.node.configured', 'calico.bgp.peers.configured'
]
if is_state('upgrade.series.in-progress'):
status.blocked('Series upgrade in progress')
return
for precondition in preconditions:
if not is_state(precondition):
return
if is_leader() and not is_state('calico.npc.deployed'):
status.waiting('Waiting to retry deploying policy controller')
return
if not service_running('calico-node'):
status.waiting('Waiting for service: calico-node')
return
status.active('Calico is active')
def calicoctl(*args):
cmd = ['/opt/calicoctl/calicoctl'] + list(args)
env = os.environ.copy()
env.update(get_calicoctl_env())
try:
return check_output(cmd, env=env, stderr=STDOUT)
except CalledProcessError as e:
log(e.output)
raise
def set_http_proxy():
"""
Check if we have any values for
juju_http*_proxy and apply them.
"""
juju_environment = env_proxy_settings()
if juju_environment and not juju_environment.get('disable-juju-proxy'):
upper = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY']
lower = list(map(str.lower, upper))
keys = upper + lower
for key in keys:
from_juju = juju_environment.get(key, None)
if from_juju:
os.environ[key] = from_juju
@when_not('calico.image.pulled')
@when('calico.ctl.ready')
def pull_calico_node_image():
image = resource_get('calico-node-image')
if not image or os.path.getsize(image) == 0:
status.maintenance('Pulling calico-node image')
image = charm_config('calico-node-image')
set_http_proxy()
CTL.pull(image)
else:
status.maintenance('Loading calico-node image')
unzipped = '/tmp/calico-node-image.tar'
with gzip.open(image, 'rb') as f_in:
with open(unzipped, 'wb') as f_out:
f_out.write(f_in.read())
CTL.load(unzipped)
set_state('calico.image.pulled')
@when_any('config.changed.calico-node-image')
def repull_calico_node_image():
remove_state('calico.image.pulled')
remove_state('calico.service.installed')
@when('calico.service.installed', 'calico.pool.configured')
def disable_vxlan_tx_checksumming():
'''Workaround for https://github.com/projectcalico/calico/issues/3145'''
config = charm_config()
if config['disable-vxlan-tx-checksumming'] and config['vxlan'] != 'Never':
cmd = ['ethtool', '-K', 'vxlan.calico', 'tx-checksum-ip-generic',
'off']
try:
check_call(cmd)
except CalledProcessError:
msg = 'Waiting to retry disabling VXLAN TX checksumming'
log(msg)
status.waiting(msg)
def calicoctl_get(*args):
args = ['get', '-o', 'yaml', '--export'] + list(args)
output = calicoctl(*args)
result = yaml.safe_load(output)
return result
def calicoctl_apply(data):
path = '/tmp/calicoctl-apply.yaml'
with open(path, 'w') as f:
yaml.dump(data, f)
calicoctl('apply', '-f', path)
def get_calicoctl_env():
etcd = endpoint_from_flag('etcd.available')
env = {}
env['ETCD_ENDPOINTS'] = etcd.get_connection_string()
env['ETCD_KEY_FILE'] = ETCD_KEY_PATH
env['ETCD_CERT_FILE'] = ETCD_CERT_PATH
env['ETCD_CA_CERT_FILE'] = ETCD_CA_PATH
return env
def get_unit_as_number():
config = charm_config()
# Check for matching unit rule
unit_id = get_unit_id()
unit_as_numbers = yaml.safe_load(config['unit-as-numbers'])
if unit_id in unit_as_numbers:
as_number = unit_as_numbers[unit_id]
return as_number
# Check for matching subnet rule
subnet_as_numbers = yaml.safe_load(config['subnet-as-numbers'])
subnets = filter_local_subnets(subnet_as_numbers)
if subnets:
subnets.sort(key=lambda subnet: -subnet.prefixlen)
subnet = subnets[0]
as_number = subnet_as_numbers[str(subnet)]
return as_number
# No AS number specified for this unit.
return None
def filter_local_subnets(subnets):
ip_address = get_bind_address()
ip_address = ipaddress.ip_address(ip_address) # IP address
subnets = [ipaddress.ip_network(subnet) for subnet in subnets]
subnets = [subnet for subnet in subnets if ip_address in subnet]
return subnets
def get_unit_id():
return int(local_unit().split('/')[1])
def get_route_reflector_cluster_id():
config = charm_config()
route_reflector_cluster_ids = yaml.safe_load(
config['route-reflector-cluster-ids']
)
unit_id = get_unit_id()
return route_reflector_cluster_ids.get(unit_id)
def get_network(cidr):
'''Convert a CIDR to a network instance.'''
return ipaddress.ip_interface(cidr.strip()).network
def get_networks(cidrs):
'''Convert a comma-separated list of CIDRs to a list of networks.'''
return [get_network(cidr) for cidr in cidrs.split(',')]

View File

@ -0,0 +1,68 @@
# Copyright 2015-2016 Canonical Ltd.
#
# This file is part of the Leadership Layer for Juju.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core import hookenv
from charmhelpers.core import unitdata
from charms import reactive
from charms.leadership import leader_get, leader_set
__all__ = ['leader_get', 'leader_set'] # Backwards compatibility
def initialize_leadership_state():
'''Initialize leadership.* states from the hook environment.
Invoked by hookenv.atstart() so states are available in
@hook decorated handlers.
'''
is_leader = hookenv.is_leader()
if is_leader:
hookenv.log('Initializing Leadership Layer (is leader)')
else:
hookenv.log('Initializing Leadership Layer (is follower)')
reactive.helpers.toggle_state('leadership.is_leader', is_leader)
previous = unitdata.kv().getrange('leadership.settings.', strip=True)
current = hookenv.leader_get()
# Handle deletions.
for key in set(previous.keys()) - set(current.keys()):
current[key] = None
any_changed = False
for key, value in current.items():
reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
value != previous.get(key))
if value != previous.get(key):
any_changed = True
reactive.helpers.toggle_state('leadership.set.{}'.format(key),
value is not None)
reactive.helpers.toggle_state('leadership.changed', any_changed)
unitdata.kv().update(current, prefix='leadership.settings.')
# Per https://github.com/juju-solutions/charms.reactive/issues/33,
# this module may be imported multiple times so ensure the
# initialization hook is only registered once. I have to piggy back
# onto the namespace of a module imported before reactive discovery
# to do this.
if not hasattr(reactive, '_leadership_registered'):
hookenv.atstart(initialize_leadership_state)
reactive._leadership_registered = True

View File

@ -0,0 +1,4 @@
from charms import layer
layer.status._initialize()

3
calico/requirements.txt Normal file
View File

@ -0,0 +1,3 @@
mock
flake8
pytest

1
calico/revision Normal file
View File

@ -0,0 +1 @@
0

View File

@ -0,0 +1,33 @@
{
"name": "calico-k8s-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"etcd_endpoints": "{{ connection_string }}",
"etcd_key_file": "{{ etcd_key_path }}",
"etcd_cert_file": "{{ etcd_cert_path }}",
"etcd_ca_cert_file": "{{ etcd_ca_path }}",
"log_level": "info",
{% if mtu -%}
"mtu": {{ mtu }},
{%- endif %}
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "{{ assign_ipv4 }}",
"assign_ipv6": "{{ assign_ipv6 }}"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "{{ kubeconfig_path }}"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}

View File

@ -0,0 +1,54 @@
[Unit]
Description=calico node
[Service]
User=root
Environment=ETCD_ENDPOINTS={{ connection_string }}
# Setting LC_ALL and LANG works around a bug that only occurs on Xenial
# https://bugs.launchpad.net/bugs/1911220
Environment=LC_ALL={{ lc_all }}
Environment=LANG={{ lang }}
PermissionsStartOnly=true
ExecStartPre=-/usr/local/sbin/charm-env --charm calico conctl delete calico-node
ExecStartPre=/bin/mkdir -p /var/run/calico /var/log/calico /var/lib/calico
ExecStart=/usr/local/sbin/charm-env --charm calico conctl run \
--rm \
--net-host \
--privileged \
--env ETCD_ENDPOINTS={{ connection_string }} \
--env ETCD_CA_CERT_FILE={{ etcd_ca_path }} \
--env ETCD_CERT_FILE={{ etcd_cert_path }} \
--env ETCD_KEY_FILE={{ etcd_key_path }} \
--env NODENAME={{ nodename }} \
--env IP={{ ip }} \
--env KUBECONFIG=/opt/calicoctl/kubeconfig \
{% if ipv4 == "none" -%}
--env CALICO_ROUTER_ID="hash" \
{% endif -%}
--env IP6={{ ip6 }} \
{% if ip6 != "none" -%}
--env FELIX_IPV6SUPPORT=true \
{% endif -%}
--env NO_DEFAULT_POOLS=true \
--env AS= \
--env CALICO_LIBNETWORK_ENABLED=true \
--env CALICO_NETWORKING_BACKEND=bird \
--env FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
--env FELIX_IGNORELOOSERPF={{ ignore_loose_rpf | string | lower }} \
{% if mtu -%}
--env FELIX_IPINIPMTU={{ mtu }} \
--env FELIX_VXLANMTU={{ mtu }} \
{% endif -%}
--mount /lib/modules:/lib/modules \
--mount /var/run/calico:/var/run/calico \
--mount /var/log/calico:/var/log/calico \
--mount /var/lib/calico:/var/lib/calico \
--mount /opt/calicoctl:/opt/calicoctl \
--name calico-node \
{{ calico_node_image }}
ExecStop=-/usr/local/sbin/charm-env --charm calico conctl delete calico-node
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,4 @@
#!/bin/sh
set -eu
. /opt/calicoctl/calicoctl.env
exec /opt/calicoctl/calicoctl "$@"

View File

@ -0,0 +1,13 @@
# Manifest for CK secrets that auth-webhook expects
---
apiVersion: v1
kind: Secret
metadata:
name: {{ secret_name }}
namespace: {{ secret_namespace }}
type: {{ type }}
data:
uid: {{ user }}
username: {{ username }}
password: {{ password }}
groups: '{{ groups }}'

View File

@ -0,0 +1,265 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Pods are monitored for changing labels.
# The node controller monitors Kubernetes nodes.
# Namespace and serviceaccount labels are used for policy.
- apiGroups:
- ""
- extensions
resources:
- pods
- nodes
- namespaces
- serviceaccounts
- networkpolicies
verbs:
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
cdk-restart-on-ca-change: "true"
spec:
# Only a single instance of the this pod should be
# active at a time. Since this pod is run as a Deployment,
# Kubernetes will ensure the pod is recreated in case of failure,
# removing the need for passive backups.
selector:
matchLabels:
k8s-app: calico-kube-controllers
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
# annotate etcd cert modification time, so that when it changes, k8s
# will restart the pod
cdk-etcd-cert-last-modified: "{{ etcd_cert_last_modified }}"
spec:
hostNetwork: true
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: {{ calico_policy_image }}
env:
- name: ETCD_ENDPOINTS
value: {{ connection_string }}
- name: ETCD_CA_CERT_FILE
value: {{ etcd_ca_path }}
- name: ETCD_CERT_FILE
value: {{ etcd_cert_path }}
- name: ETCD_KEY_FILE
value: {{ etcd_key_path }}
volumeMounts:
- name: calicoctl
mountPath: /opt/calicoctl
volumes:
- name: calicoctl
hostPath:
path: /opt/calicoctl
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: User
name: system:calico-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-reader
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nodes-namespace-reader
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: namespace-reader

View File

@ -0,0 +1,9 @@
options:
as-number:
type: int
description: AS Number
default: 64512
bgp-peers:
type: string
description: BGP peers
default: "[]"

View File

@ -0,0 +1,7 @@
name: bird
description: |
Test charm running BIRD
summary: |
Test charm running BIRD
series:
- focal

View File

@ -0,0 +1 @@
ops

View File

@ -0,0 +1,60 @@
#!/usr/bin/env python3
import logging
from ops.charm import CharmBase
from ops.main import main
from ops.model import ActiveStatus, MaintenanceStatus
from subprocess import check_call
import yaml
log = logging.getLogger(__name__)
bird_config_base = """
log syslog all;
debug protocols all;
protocol kernel {
persist;
scan time 20;
export all;
}
protocol device {
scan time 10;
}
"""
bird_config_peer = """
protocol bgp {
import all;
local as %s;
neighbor %s as %s;
direct;
}
"""
class BirdCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.install, self.install)
self.framework.observe(self.on.config_changed, self.config_changed)
def install(self, event):
self.unit.status = MaintenanceStatus("Installing BIRD")
check_call(['apt-get', 'update'])
check_call(['apt-get', 'install', '-y', 'bird'])
def config_changed(self, event):
self.unit.status = MaintenanceStatus("Configuring BIRD")
as_number = self.config['as-number']
bird_config = "\n".join([bird_config_base] + [
bird_config_peer % (as_number, peer['address'], peer['as-number'])
for peer in yaml.safe_load(self.config['bgp-peers'])
])
with open('/etc/bird/bird.conf', 'w') as f:
f.write(bird_config)
check_call(['systemctl', 'reload', 'bird'])
self.unit.status = ActiveStatus()
if __name__ == "__main__":
main(BirdCharm)

View File

@ -0,0 +1,80 @@
description: A minimal two-machine Kubernetes cluster, appropriate for development.
series: focal
machines:
'0':
constraints: cores=2 mem=4G root-disk=16G
series: focal
'1':
constraints: cores=4 mem=4G root-disk=16G
series: focal
services:
containerd:
charm: cs:~containers/containerd
channel: edge
easyrsa:
charm: cs:~containers/easyrsa
channel: edge
num_units: 1
to:
- '1'
etcd:
charm: cs:~containers/etcd
channel: edge
num_units: 1
options:
channel: 3.4/stable
to:
- '0'
calico:
charm: {{calico_charm}}
resources:
calico: {{resource_path}}/calico-amd64.tar.gz
calico-arm64: {{resource_path}}/calico-arm64.tar.gz
calico-upgrade: {{resource_path}}/calico-upgrade-amd64.tar.gz
calico-upgrade-arm64: {{resource_path}}/calico-upgrade-arm64.tar.gz
calico-node-image: {{resource_path}}/calico-node-image.tar.gz
options:
ignore-loose-rpf: true
vxlan: Always
kubernetes-master:
charm: cs:~containers/kubernetes-master
channel: edge
constraints: cores=2 mem=4G root-disk=16G
expose: true
num_units: 1
options:
channel: 1.22/edge
to:
- '0'
kubernetes-worker:
charm: cs:~containers/kubernetes-worker
channel: edge
constraints: cores=4 mem=4G root-disk=16G
expose: true
num_units: 1
options:
channel: 1.22/edge
to:
- '1'
relations:
- - kubernetes-master:kube-control
- kubernetes-worker:kube-control
- - kubernetes-master:certificates
- easyrsa:client
- - kubernetes-master:etcd
- etcd:db
- - kubernetes-worker:certificates
- easyrsa:client
- - etcd:certificates
- easyrsa:client
- - calico:etcd
- etcd:db
- - calico:cni
- kubernetes-master:cni
- - calico:cni
- kubernetes-worker:cni
- - containerd:containerd
- kubernetes-worker:container-runtime
- - containerd:containerd
- kubernetes-master:container-runtime

View File

@ -0,0 +1,4 @@
import charms.unit_test
charms.unit_test.patch_reactive()

View File

@ -0,0 +1,90 @@
from functools import partial
import pytest
from unittest import mock
from charms.layer import kubernetes_common
class TestCreateKubeConfig:
@pytest.fixture(autouse=True)
def _files(self, tmp_path):
self.cfg_file = tmp_path / "config"
self.ca_file = tmp_path / "ca.crt"
self.ca_file.write_text("foo")
self.ckc = partial(
kubernetes_common.create_kubeconfig,
self.cfg_file,
"server",
self.ca_file,
)
def test_guard_clauses(self):
with pytest.raises(ValueError):
self.ckc()
assert not self.cfg_file.exists()
with pytest.raises(ValueError):
self.ckc(token="token", password="password")
assert not self.cfg_file.exists()
with pytest.raises(ValueError):
self.ckc(key="key")
assert not self.cfg_file.exists()
def test_file_creation(self):
self.ckc(password="password")
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert cfg_data_1
def test_idempotency(self):
self.ckc(password="password")
cfg_data_1 = self.cfg_file.read_text()
self.ckc(password="password")
cfg_data_2 = self.cfg_file.read_text()
# Verify that calling w/ the same data keeps the same file contents.
assert cfg_data_2 == cfg_data_1
def test_efficient_updates(self):
self.ckc(password="old_password")
cfg_stat_1 = self.cfg_file.stat()
self.ckc(password="old_password")
cfg_stat_2 = self.cfg_file.stat()
self.ckc(password="new_password")
cfg_stat_3 = self.cfg_file.stat()
# Verify that calling with the same data doesn't
# modify the file at all, but that new data does
assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime
def test_aws_iam(self):
self.ckc(password="password", aws_iam_cluster_id="aws-cluster")
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert "aws-cluster" in cfg_data_1
def test_keystone(self):
self.ckc(password="password", keystone=True)
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert "keystone-user" in cfg_data_1
assert "exec" in cfg_data_1
def test_atomic_updates(self):
self.ckc(password="old_password")
with self.cfg_file.open("rt") as f:
# Perform a write in the middle of reading
self.ckc(password="new_password")
# Read data from existing FH after new data was written
cfg_data_1 = f.read()
# Read updated data
cfg_data_2 = self.cfg_file.read_text()
# Verify that the in-progress read didn't get any of the new data
assert cfg_data_1 != cfg_data_2
assert "old_password" in cfg_data_1
assert "new_password" in cfg_data_2
@mock.patch("charmhelpers.core.hookenv.network_get", autospec=True)
def test_get_ingress_address(self, network_get):
network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]}
ingress = kubernetes_common.get_ingress_address("endpoint-name")
assert ingress == "1.2.3.4"
ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"])
assert ingress == "5.6.7.8"

View File

@ -0,0 +1,36 @@
from kubernetes_wrapper import Kubernetes
import logging
import pytest
import random
import string
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
@pytest.mark.asyncio
async def kubernetes(ops_test):
kubeconfig_path = ops_test.tmp_path / "kubeconfig"
retcode, stdout, stderr = await ops_test.run(
"juju", "scp", "kubernetes-master/leader:config", kubeconfig_path
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to copy kubeconfig from kubernetes-master")
namespace = "test-calico-integration-" + "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(5)
)
kubernetes = Kubernetes(namespace, kubeconfig=str(kubeconfig_path))
namespace_object = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': namespace
}
}
kubernetes.apply_object(namespace_object)
yield kubernetes
kubernetes.delete_object(namespace_object)

View File

@ -0,0 +1,139 @@
import logging
import os
import pytest
import time
import yaml
log = logging.getLogger(__name__)
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test):
resource_path = ops_test.tmp_path / "charm-resources"
resource_path.mkdir()
resource_build_script = os.path.abspath("./build-calico-resource.sh")
log.info("Building charm resources")
retcode, stdout, stderr = await ops_test.run(
resource_build_script,
cwd=resource_path
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to build charm resources")
bundle = ops_test.render_bundle(
"tests/data/bundle.yaml",
calico_charm=await ops_test.build_charm("."),
resource_path=resource_path
)
# deploy with Juju CLI because libjuju does not support local resource
# paths in bundles
log.info("Deploying bundle")
retcode, stdout, stderr = await ops_test.run(
"juju", "deploy", "-m", ops_test.model_full_name, bundle
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to deploy bundle")
await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60)
async def test_bgp_service_ip_advertisement(ops_test, kubernetes):
# deploy a test service in k8s (nginx)
deployment = {
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': 'nginx'
},
'spec': {
'selector': {
'matchLabels': {
'app': 'nginx'
}
},
'template': {
'metadata': {
'labels': {
'app': 'nginx'
}
},
'spec': {
'containers': [{
'name': 'nginx',
'image': 'rocks.canonical.com/cdk/nginx:1.18',
'ports': [{
'containerPort': 80
}]
}]
}
}
}
}
service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': 'nginx'
},
'spec': {
'selector': {
'app': 'nginx'
},
'ports': [{
'protocol': 'TCP',
'port': 80
}]
}
}
kubernetes.apply_object(deployment)
kubernetes.apply_object(service)
service_ip = kubernetes.read_object(service).spec.cluster_ip
# build and deploy bird charm
bird_charm = await ops_test.build_charm("tests/data/bird-operator")
await ops_test.model.deploy(bird_charm)
await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 10)
# configure calico to peer with bird
master_config = await ops_test.model.applications['kubernetes-master'].get_config()
bird_app = ops_test.model.applications['bird']
calico_app = ops_test.model.applications['calico']
await calico_app.set_config({
'bgp-service-cluster-ips': master_config['service-cidr']['value'],
'global-bgp-peers': yaml.dump([
{'address': unit.public_address, 'as-number': 64512}
for unit in bird_app.units
])
})
# configure bird to peer with calico
await bird_app.set_config({
'bgp-peers': yaml.dump([
{'address': unit.public_address, 'as-number': 64512}
for unit in calico_app.units
])
})
# verify test service is reachable from bird
deadline = time.time() + 60 * 10
while time.time() < deadline:
retcode, stdout, stderr = await ops_test.run(
'juju', 'ssh', '-m', ops_test.model_full_name, 'bird/leader',
'curl', '--connect-timeout', '10', service_ip
)
if retcode == 0:
break
else:
pytest.fail("Failed service connection test after BGP config")
# clean up
await calico_app.set_config({
'bgp-service-cluster-ips': '',
'global-bgp-peers': '[]'
})
await bird_app.destroy()

View File

@ -0,0 +1,6 @@
import charms.unit_test
charms.unit_test.patch_reactive()
charms.unit_test.patch_module('conctl')
charms.unit_test.patch_module('charms.leadership')

View File

@ -0,0 +1,16 @@
from charmhelpers.core.hookenv import is_leader # patched
from charmhelpers.core.host import service_running # patched
from reactive import calico
def test_series_upgrade():
calico.set_state('upgrade.series.in-progress')
is_leader.return_value = False
service_running.return_value = True
assert calico.status.blocked.call_count == 0
assert calico.status.waiting.call_count == 0
assert calico.status.active.call_count == 0
calico.ready()
assert calico.status.blocked.call_count == 1
assert calico.status.waiting.call_count == 0
assert calico.status.active.call_count == 0

View File

@ -0,0 +1,122 @@
import json
import string
from subprocess import CalledProcessError
from unittest.mock import Mock
from charms.layer import kubernetes_common as kc
def test_token_generator():
alphanum = string.ascii_letters + string.digits
token = kc.token_generator(10)
assert len(token) == 10
unknown_chars = set(token) - set(alphanum)
assert not unknown_chars
def test_get_secret_names(monkeypatch):
monkeypatch.setattr(kc, "kubectl", Mock())
kc.kubectl.side_effect = [
CalledProcessError(1, "none"),
FileNotFoundError,
"{}".encode("utf8"),
json.dumps(
{
"items": [
{
"metadata": {"name": "secret-id"},
"data": {"username": "dXNlcg=="},
},
],
}
).encode("utf8"),
]
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {"user": "secret-id"}
def test_generate_rfc1123():
alphanum = string.ascii_letters + string.digits
token = kc.generate_rfc1123(1000)
assert len(token) == 253
unknown_chars = set(token) - set(alphanum)
assert not unknown_chars
def test_create_secret(monkeypatch):
monkeypatch.setattr(kc, "render", Mock())
monkeypatch.setattr(kc, "kubectl_manifest", Mock())
monkeypatch.setattr(kc, "get_secret_names", Mock())
monkeypatch.setattr(kc, "generate_rfc1123", Mock())
kc.kubectl_manifest.side_effect = [True, False]
kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}]
kc.generate_rfc1123.return_value = "foo"
assert kc.create_secret("token", "username", "user", "groups")
assert kc.render.call_args[1]["context"] == {
"groups": "Z3JvdXBz",
"password": "dXNlcjo6dG9rZW4=",
"secret_name": "secret-id",
"secret_namespace": "kube-system",
"type": "juju.is/token-auth",
"user": "dXNlcg==",
"username": "dXNlcm5hbWU=",
}
assert not kc.create_secret("token", "username", "user", "groups")
assert kc.render.call_args[1]["context"] == {
"groups": "Z3JvdXBz",
"password": "dXNlcjo6dG9rZW4=",
"secret_name": "auth-user-foo",
"secret_namespace": "kube-system",
"type": "juju.is/token-auth",
"user": "dXNlcg==",
"username": "dXNlcm5hbWU=",
}
def test_get_secret_password(monkeypatch):
monkeypatch.setattr(kc, "kubectl", Mock())
monkeypatch.setattr(kc, "Path", Mock())
monkeypatch.setattr(kc, "yaml", Mock())
kc.kubectl.side_effect = [
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
FileNotFoundError,
json.dumps({}).encode("utf8"),
json.dumps({"items": []}).encode("utf8"),
json.dumps({"items": []}).encode("utf8"),
json.dumps({"items": [{}]}).encode("utf8"),
json.dumps({"items": [{"data": {}}]}).encode("utf8"),
json.dumps(
{"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]}
).encode("utf8"),
json.dumps(
{"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]}
).encode("utf8"),
]
kc.yaml.safe_load.side_effect = [
{},
{"users": None},
{"users": []},
{"users": [{"user": {}}]},
{"users": [{"user": {"token": "secret"}}]},
]
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") == "secret"
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") == "secret"

View File

@ -0,0 +1,8 @@
#!/bin/bash
build_dir="$(mktemp -d)"
function cleanup { rm -rf "$build_dir"; }
trap cleanup EXIT
charm build . --build-dir "$build_dir"
pip install -f "$build_dir/calico/wheelhouse" --no-index --no-cache-dir "$build_dir"/calico/wheelhouse/*

41
calico/tox.ini Normal file
View File

@ -0,0 +1,41 @@
[flake8]
max-line-length = 88
[tox]
skipsdist = True
envlist = lint,unit,integration
[testenv]
setenv =
PYTHONPATH={toxinidir}:{toxinidir}/lib
PYTHONBREAKPOINT=ipdb.set_trace
[testenv:unit]
deps =
pyyaml
pytest
charms.unit_test
ipdb
commands = pytest --tb native -s {posargs} {toxinidir}/tests/unit
[testenv:validate-wheelhouse]
allowlist_externals = {toxinidir}/tests/validate-wheelhouse.sh
commands = {toxinidir}/tests/validate-wheelhouse.sh
[testenv:integration]
deps =
pytest
pytest-operator
aiohttp
ipdb
git+https://github.com/canonical/kubernetes-rapper@main#egg=kubernetes-wrapper
# tox only passes through the upper-case versions by default, but some
# programs, such as wget or pip, only honor the lower-case versions
passenv = http_proxy https_proxy no_proxy
commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration
[testenv:lint]
deps =
flake8
commands =
flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests

1
calico/version Normal file
View File

@ -0,0 +1 @@
ccfa68be

23
calico/wheelhouse.txt Normal file
View File

@ -0,0 +1,23 @@
# layer:basic
# pip is pinned to <19.0 to avoid https://github.com/pypa/pip/issues/6164
# even with installing setuptools before upgrading pip ends up with pip seeing
# the older setuptools at the system level if include_system_packages is true
pip>=18.1,<19.0
# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5
# for trusty
Jinja2<=2.10.1
PyYAML<=5.2
MarkupSafe<2.0.0
setuptools<42
setuptools-scm<=1.17.0
charmhelpers>=0.4.0,<1.0.0
charms.reactive>=0.1.0,<2.0.0
wheel<0.34
# pin netaddr to avoid pulling importlib-resources
netaddr<=0.7.19
# calico
conctl-py35==0.1.2
# pin click to avoid bringing in incompatible setuptools>=42
click<8.0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More