update kubernetes branch 1.22

This commit is contained in:
xiafan 2023-06-28 20:24:52 +08:00
parent 9175d4acad
commit 64b89fba6a
489 changed files with 15648 additions and 5324 deletions

View File

@ -1,19 +1,20 @@
# Kubernetes 1.21
cs:~containers/charmed-kubernetes-657
# Kubernetes 1.22
cs:~containers/charmed-kubernetes-814
```Bash
charm pull cs:~containers/etcd-583
charm pull cs:~containers/easyrsa-373
charm pull cs:~containers/kubernetes-master-990
charm pull cs:~containers/kubernetes-worker-757
charm pull cs:~containers/containerd-119
charm pull cs:~containers/kata-108
charm pull cs:~containers/calico-812
charm pull cs:~containers/etcd-633
charm pull cs:~containers/easyrsa-419
charm pull cs:~containers/kubernetes-master-1077
charm pull cs:~containers/kubernetes-worker-815
charm pull cs:~containers/containerd-177
charm pull cs:~containers/kata-138
charm pull cs:~containers/calico-838
charm pull cs:~containers/flannel-596
# Extend
charm pull cs:~containers/kubeapi-load-balancer-786
charm pull cs:~containers/keepalived-85
charm pull cs:~containers/kubeapi-load-balancer-843
charm pull cs:~containers/keepalived-110
charm pull cs:~containers/coredns-20
# Other
charm pull cs:~containers/ubuntu-20
charm pull cs:~containers/nrpe-75
```
```

View File

@ -6,8 +6,8 @@
"url": "layer:options"
},
{
"branch": "refs/heads/stable",
"rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16",
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083",
"url": "layer:basic"
},
{
@ -20,9 +20,14 @@
"rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
"url": "layer:status"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "bbeabfee52c4442cdaf3a34e5e35530a3bd71156",
"url": "layer:kubernetes-common"
},
{
"branch": "refs/heads/stable",
"rev": "63c6d240f29b0366c3839dacd4e25d63a368da36",
"rev": "96b4e06d5d35fec30cdf2cc25076dd25c51b893c",
"url": "calico"
},
{
@ -32,7 +37,7 @@
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "b941b3b542d78ad15aa40937b26c7bf727e1b39b",
"rev": "88b1e8fad78d06efdbf512cd75eaa0bb308eb1c1",
"url": "interface:kubernetes-cni"
}
],
@ -42,26 +47,21 @@
"dynamic",
"unchecked"
],
".github/workflows/build.yml": [
"calico",
".github/workflows/main.yml": [
"layer:kubernetes-common",
"static",
"4892e4eb72fb0d0efaa1c6b62f8f132cc69ea2b967c9604c91d4f16e0ec6e26b"
"d4f8fec0456cb2fc05993253a995983488a76fbbef10c2ee40649e83d6c9e078"
],
".github/workflows/tox.yaml": [
"calico",
"static",
"8de54f40fc8e9385b79ed8d19e6ea765bdd6c48185fbd8bd7142834990982d45"
"8b7dba2bd100fc3dfce764499b0eba1799b58469701b032b238cb1d0055c44bb"
],
".gitignore": [
"calico",
"static",
"3437c2cd90de443f44766939172b82e750e19fd474df499ffe003bb807e8cef4"
],
".travis/profile-update.yaml": [
"layer:basic",
"static",
"731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
],
"CONTRIBUTING.md": [
"calico",
"static",
@ -78,9 +78,9 @@
"58d1e17ffe5109a7ae296caafcadfdbe6a7d176f0bc4ab01e12a689b0499d8bd"
],
"Makefile": [
"calico",
"layer:basic",
"static",
"d49436a9eb35598691285b00e6a678ad74e391a818d55989116e264f40fcd9e6"
"b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
],
"README.md": [
"calico",
@ -105,7 +105,7 @@
"config.yaml": [
"calico",
"dynamic",
"c6014840f64c5c4cab24fa54735832e36ecd11de15ab6e34ecedf5839feca695"
"d75dd7b4ddd803d88c5d86b14826fa7f047b8e6907885cafe37cda29afb3c13d"
],
"copyright": [
"layer:status",
@ -257,15 +257,15 @@
"static",
"8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
],
"hooks/relations/kubernetes-cni/.github/workflows/tests.yaml": [
"interface:kubernetes-cni",
"static",
"d0015cd49675976ff87832f5ef7ea20ffca961786379c72bb6acdbdeddd9137c"
],
"hooks/relations/kubernetes-cni/.gitignore": [
"interface:kubernetes-cni",
"static",
"cf237c7aff44efbe6e502e645c3e06da03a69d7bdeb43392108ef3348143417e"
],
"hooks/relations/kubernetes-cni/.travis.yml": [
"interface:kubernetes-cni",
"static",
"c2bd1b88f26c88b883696cca155c28671359a256ed48b90a9ea724b376f2a829"
"0594213ebf9c6ef87827b30405ee67d847f73f4185a865e0e5e9c0be9d29eabe"
],
"hooks/relations/kubernetes-cni/README.md": [
"interface:kubernetes-cni",
@ -285,17 +285,17 @@
"hooks/relations/kubernetes-cni/provides.py": [
"interface:kubernetes-cni",
"static",
"4c3fc3f06a42a2f67fc03c4bc1b4c617021dc1ebb7111527ce6d9cd523b0c40e"
"e436e187f2bab6e73add2b897cd43a2f000fde4726e40b772b66f27786c85dee"
],
"hooks/relations/kubernetes-cni/requires.py": [
"interface:kubernetes-cni",
"static",
"c5fdd7a0eae100833ae6c79474f931803466cd5b206cf8f456cd6f2716d1d2fa"
"45398af27246eaf2005115bd3f270b78fc830d4345b02cc0c4d438711b7cd9fe"
],
"hooks/relations/kubernetes-cni/tox.ini": [
"interface:kubernetes-cni",
"static",
"bf0fb0883583bb3deebd17e7ebd4599d9f3770c19a6fc7683044654b6e982c90"
"f08626c9b65362031edb07f96f15f101bc3dda075abc64f54d1c83efd2c05e39"
],
"hooks/start": [
"layer:basic",
@ -325,7 +325,7 @@
"layer.yaml": [
"calico",
"dynamic",
"8547f11913f564feb1ca4f6674788385e237b4d8d1939c5a8675c6bbb4f1d8e3"
"3a95aaa6fd50027d9a98ad9322568cfb0c228135df7cbff79953a86d01ec533f"
],
"lib/calico_common.py": [
"calico",
@ -345,13 +345,18 @@
"lib/charms/layer/basic.py": [
"layer:basic",
"static",
"3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
"98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9"
],
"lib/charms/layer/execd.py": [
"layer:basic",
"static",
"fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
],
"lib/charms/layer/kubernetes_common.py": [
"layer:kubernetes-common",
"static",
"29cedffd490e6295273d195a7c9bace2fcdf149826e7427f2af9698f7f75055b"
],
"lib/charms/layer/options.py": [
"layer:options",
"static",
@ -390,7 +395,7 @@
"reactive/calico.py": [
"calico",
"static",
"3037c342634848aca03bb3a8b818102ae13e4d82942e1c8f8761c8465b808e14"
"6b8bef93b474c95bab4d9df09b74b8082f230fa5e906b469fae66baa319472ad"
],
"reactive/leadership.py": [
"layer:leadership",
@ -407,21 +412,6 @@
"static",
"a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
],
"script/bootstrap": [
"calico",
"static",
"1985d9a07e8d764351530f6eb1b81bef6a4c035dc75422c03f4672ceaf1a4c18"
],
"script/build": [
"calico",
"static",
"e78cab1bead2e3c8f7970558f4d08a81f6cc59e5c2903e997644f7e51e7a3633"
],
"script/upload": [
"calico",
"static",
"db3cd04f1d4c2a2be12becb8d62bf879701cbca3da0d458b4c362439b32ebfc1"
],
"templates/10-calico.conflist": [
"calico",
"static",
@ -430,47 +420,97 @@
"templates/calico-node.service": [
"calico",
"static",
"beae0c32a25f911a37363064af7bfa96a39f14ab99b3060412491382a81ddaa7"
"cc80a397a77f7d80740c697fcdaffd373790492b01959649587345bdcfe44fe3"
],
"templates/calicoctl": [
"calico",
"static",
"b913dfdce8de51aa9a13950817e4101f8f4229052927a272fff5b37a4370537f"
],
"templates/cdk.auth-webhook-secret.yaml": [
"layer:kubernetes-common",
"static",
"efaf34c12a5c961fa7843199070945ba05717b3656a0f3acc3327f45334bcaec"
],
"templates/policy-controller.yaml": [
"calico",
"static",
"3bd0f0f714a8c7f418fdb7556f10097d963dbf0c6232a41606163c30022f0e9e"
"427820ac4957c60306b3084c4426ecc84af34dc2b2a8f7c0d70242e53c27957c"
],
"tests/00-setup": [
"tests/data/bird-operator/config.yaml": [
"calico",
"static",
"111c079b81d260bbcd716dcf41672372a4cf4aaa14154b6c3055deeedae37a06"
"4786605f043192ab2970b7abd55c434620463248e2840a7d25a9ca31d913b416"
],
"tests/10-deploy": [
"tests/data/bird-operator/metadata.yaml": [
"calico",
"static",
"e895f7720cd0ce3956082054f15b0cebce683aa44f66bb32038bab1e693bf74f"
"aff75a91343249cb86978512609d0e00c9d6271664b18eeed9e4ef415bd22937"
],
"tests/conftest.py": [
"tests/data/bird-operator/requirements.txt": [
"calico",
"static",
"7a70b4e7059a7d283c883288be3de0bed02d10fda4602c8de4699debcf6afbf2"
],
"tests/data/bird-operator/src/charm.py": [
"calico",
"static",
"8e0374bf6e887604e3ede4ba33d37cf0e43202b653cb3945cefff0d2a33e7a0c"
],
"tests/data/bundle.yaml": [
"calico",
"static",
"0bfb15407e4ac33c87718e20493c0eec3979d8658db85d4f38620b9fca4408bd"
],
"tests/functional/conftest.py": [
"layer:kubernetes-common",
"static",
"fd53e0c38b4dda0c18096167889cd0d85b98b0a13225f9f8853261241e94078c"
],
"tests/functional/test_k8s_common.py": [
"layer:kubernetes-common",
"static",
"680a53724154771dd78422bbaf24b151788d86dd07960712c5d9e0d758499b50"
],
"tests/integration/conftest.py": [
"calico",
"static",
"9069857cdd09798df7813cec38c1147938e9affb777d971c6b8ff3405fa726ff"
],
"tests/integration/test_calico_integration.py": [
"calico",
"static",
"65ce21b694059e6cf6b39d28cba16252a34322b77b5b33050fd46482f60d07fd"
],
"tests/unit/conftest.py": [
"calico",
"static",
"2c58cb11bf276805f586c05c20bf4ba15a7431b5c23ea3323dc4256ddc34c4d2"
],
"tests/test_calico.py": [
"tests/unit/test_calico.py": [
"calico",
"static",
"2de748d396d66f5c581ade110a3f8a709e6aabe50f97502e1d0ac0ec817c223d"
],
"tests/unit/test_k8s_common.py": [
"layer:kubernetes-common",
"static",
"da9bcea8e75160311a4055c1cbf577b497ddd45dc00223c5f1667598f94d9be4"
],
"tests/validate-wheelhouse.sh": [
"calico",
"static",
"cdfd66832b110243b6fd165a75562d9b958f9741b334be2d3a7a1d05adfa6fe7"
],
"tox.ini": [
"calico",
"static",
"1ce2114e5084c1f5bc99f1768c0566f77b8216166974de3b17c47e97b54aba7d"
"a96563719d29a96d41a0e91ef08da35b5e1de5aee2d5884c74d85dca7f43f2d2"
],
"version": [
"calico",
"dynamic",
"44a751fcf4d3ba30169f70f2b7b84b9cfc381b6f514c41fe4d3ef8afe2ff9086"
"d42cce56c73a1877421efe5be4d1e7e914a99ce4e1e4b0143bd97ea895c7c629"
],
"wheelhouse.txt": [
"calico",
@ -497,10 +537,10 @@
"dynamic",
"cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
],
"wheelhouse/charmhelpers-0.20.22.tar.gz": [
"wheelhouse/charmhelpers-0.20.23.tar.gz": [
"layer:basic",
"dynamic",
"b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb"
"59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e"
],
"wheelhouse/charms.reactive-1.4.1.tar.gz": [
"layer:basic",
@ -532,10 +572,10 @@
"dynamic",
"c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
],
"wheelhouse/pyaml-20.4.0.tar.gz": [
"wheelhouse/pyaml-21.10.1.tar.gz": [
"__pip__",
"dynamic",
"29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
"c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383"
],
"wheelhouse/setuptools-41.6.0.zip": [
"layer:basic",

View File

@ -1,16 +0,0 @@
name: Builds calico charm
on: [push, pull_request]
jobs:
build:
name: Build charm
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python 3.8
uses: actions/setup-python@v2
with:
python-version: '3.8'
- name: Run build
run: |
make charm

22
calico/.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: Test Suite
on: [pull_request]
jobs:
tests:
name: Lint, Unit, & Func Tests
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
- name: Run lint
run: tox

View File

@ -1,22 +1,52 @@
name: Run tests with Tox
on: [push]
on:
push:
branches: [master]
pull_request:
branches: [master]
jobs:
build:
lint-unit-wheelhouse:
name: Lint, Unit, Wheelhouse
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v2
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
sudo snap install charm --classic
- name: Lint
run: tox -vve lint
- name: Unit Tests
run: tox -vve unit
- name: Validate Wheelhouse
run: tox -vve validate-wheelhouse
integration-test:
name: Integration test with VMWare
runs-on: self-hosted
timeout-minutes: 360
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v1
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Tox and any other packages
run: pip install tox
- name: Run Tox
run: tox -e py # Run tox using the version of Python in `PATH`
python-version: 3.8
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@master
with:
provider: vsphere
credentials-yaml: ${{ secrets.CREDENTIALS_YAML }}
clouds-yaml: ${{ secrets.CLOUDS_YAML }}
bootstrap-options: "--model-default datastore=vsanDatastore --model-default primary-network=VLAN_2764"
- name: Run test
run: tox -e integration

View File

@ -1,12 +0,0 @@
config: {}
description: Default LXD profile - updated
devices:
eth0:
name: eth0
parent: lxdbr0
nictype: bridged
type: nic
root:
path: /
pool: default
type: disk

View File

@ -1,18 +1,24 @@
CHANNEL ?= unpublished
CHARM := calico
#!/usr/bin/make
setup-env:
bash script/bootstrap
all: lint unit_test
charm: setup-env
bash script/build
upload:
ifndef NAMESPACE
$(error NAMESPACE is not set)
endif
.PHONY: clean
clean:
@rm -rf .tox
env CHARM=$(CHARM) NAMESPACE=$(NAMESPACE) CHANNEL=$(CHANNEL) bash script/upload
.PHONY: apt_prereqs
apt_prereqs:
@# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
@which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
.phony: charm upload setup-env
all: charm
.PHONY: lint
lint: apt_prereqs
@tox --notest
@PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
@charm proof
.PHONY: unit_test
unit_test: apt_prereqs
@echo Starting tests...
tox

View File

@ -1,4 +1,22 @@
"options":
"bgp-service-cluster-ips":
"type": "string"
"description": |
Space-separated list of service cluster CIDRs to advertise over BGP.
These will be passed to the .spec.serviceClusterIPs field of the default
BGPConfiguration in Calico.
Example value: ”10.0.0.0/24 10.0.1.0/24”
"default": ""
"bgp-service-external-ips":
"type": "string"
"description": |
Space-separated list of service external CIDRs to advertise over BGP.
These will be passed to the .spec.serviceExternalIPs field of the default
BGPConfiguration in Calico.
Example value: ”10.0.0.0/24 10.0.1.0/24”
"default": ""
"calico-node-image":
"type": "string"
# Please refer to layer-canal/versioning.md before changing the version below.

View File

@ -0,0 +1,24 @@
name: Test Suite for K8s Service Interface
on:
- pull_request
jobs:
lint-and-unit-tests:
name: Lint & Unit tests
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Tox
run: pip install tox
- name: Run lint & unit tests
run: tox

View File

@ -1 +1,4 @@
.DS_Store
.tox
__pycache__
*.pyc

View File

@ -1,48 +1,46 @@
#!/usr/bin/python
from charmhelpers.core import hookenv
from charmhelpers.core.host import file_hash
from charms.layer.kubernetes_common import kubeclientconfig_path
from charms.reactive import Endpoint
from charms.reactive import toggle_flag, is_flag_set, clear_flag, set_flag
class CNIPluginProvider(Endpoint):
def manage_flags(self):
toggle_flag(self.expand_name('{endpoint_name}.connected'),
self.is_joined)
toggle_flag(self.expand_name('{endpoint_name}.available'),
self.config_available())
if is_flag_set(self.expand_name('endpoint.{endpoint_name}.changed')):
clear_flag(self.expand_name('{endpoint_name}.configured'))
clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
toggle_flag(self.expand_name("{endpoint_name}.connected"), self.is_joined)
toggle_flag(
self.expand_name("{endpoint_name}.available"), self.config_available()
)
if is_flag_set(self.expand_name("endpoint.{endpoint_name}.changed")):
clear_flag(self.expand_name("{endpoint_name}.configured"))
clear_flag(self.expand_name("endpoint.{endpoint_name}.changed"))
def set_config(self, is_master, kubeconfig_path):
''' Relays a dict of kubernetes configuration information. '''
def set_config(self, is_master):
"""Relays a dict of kubernetes configuration information."""
for relation in self.relations:
relation.to_publish_raw.update({
'is_master': is_master,
'kubeconfig_path': kubeconfig_path
})
set_flag(self.expand_name('{endpoint_name}.configured'))
relation.to_publish_raw.update({"is_master": is_master})
set_flag(self.expand_name("{endpoint_name}.configured"))
def config_available(self):
''' Ensures all config from the CNI plugin is available. '''
"""Ensures all config from the CNI plugin is available."""
goal_state = hookenv.goal_state()
related_apps = [
app for app in goal_state.get('relations', {}).get(self.endpoint_name, '')
if '/' not in app
app
for app in goal_state.get("relations", {}).get(self.endpoint_name, "")
if "/" not in app
]
if not related_apps:
return False
configs = self.get_configs()
return all(
'cidr' in config and 'cni-conf-file' in config
for config in [
configs.get(related_app, {}) for related_app in related_apps
]
"cidr" in config and "cni-conf-file" in config
for config in [configs.get(related_app, {}) for related_app in related_apps]
)
def get_config(self, default=None):
''' Get CNI config for one related application.
"""Get CNI config for one related application.
If default is specified, and there is a related application with a
matching name, then that application is chosen. Otherwise, the
@ -50,13 +48,13 @@ class CNIPluginProvider(Endpoint):
Whichever application is chosen, that application's CNI config is
returned.
'''
"""
configs = self.get_configs()
if not configs:
return {}
elif default and default not in configs:
msg = 'relation not found for default CNI %s, ignoring' % default
hookenv.log(msg, level='WARN')
msg = "relation not found for default CNI %s, ignoring" % default
hookenv.log(msg, level="WARN")
return self.get_config()
elif default:
return configs.get(default, {})
@ -64,7 +62,7 @@ class CNIPluginProvider(Endpoint):
return configs.get(sorted(configs)[0], {})
def get_configs(self):
''' Get CNI configs for all related applications.
"""Get CNI configs for all related applications.
This returns a mapping of application names to CNI configs. Here's an
example return value:
@ -78,8 +76,14 @@ class CNIPluginProvider(Endpoint):
'cni-conf-file': '10-calico.conflist'
}
}
'''
"""
return {
relation.application_name: relation.joined_units.received_raw
for relation in self.relations if relation.application_name
for relation in self.relations
if relation.application_name
}
def notify_kubeconfig_changed(self):
kubeconfig_hash = file_hash(kubeclientconfig_path)
for relation in self.relations:
relation.to_publish_raw.update({"kubeconfig-hash": kubeconfig_hash})

View File

@ -1,45 +1,54 @@
#!/usr/bin/python
from charmhelpers.core import unitdata
from charms.reactive import Endpoint
from charms.reactive import when_any, when_not
from charms.reactive import set_state, remove_state
db = unitdata.kv()
class CNIPluginClient(Endpoint):
def manage_flags(self):
kubeconfig_hash = self.get_config().get("kubeconfig-hash")
kubeconfig_hash_key = self.expand_name("{endpoint_name}.kubeconfig-hash")
if kubeconfig_hash:
set_state(self.expand_name("{endpoint_name}.kubeconfig.available"))
if kubeconfig_hash != db.get(kubeconfig_hash_key):
set_state(self.expand_name("{endpoint_name}.kubeconfig.changed"))
db.set(kubeconfig_hash_key, kubeconfig_hash)
@when_any('endpoint.{endpoint_name}.joined',
'endpoint.{endpoint_name}.changed')
@when_any("endpoint.{endpoint_name}.joined", "endpoint.{endpoint_name}.changed")
def changed(self):
''' Indicate the relation is connected, and if the relation data is
set it is also available. '''
set_state(self.expand_name('{endpoint_name}.connected'))
"""Indicate the relation is connected, and if the relation data is
set it is also available."""
set_state(self.expand_name("{endpoint_name}.connected"))
config = self.get_config()
if config['is_master'] == 'True':
set_state(self.expand_name('{endpoint_name}.is-master'))
set_state(self.expand_name('{endpoint_name}.configured'))
elif config['is_master'] == 'False':
set_state(self.expand_name('{endpoint_name}.is-worker'))
set_state(self.expand_name('{endpoint_name}.configured'))
if config["is_master"] == "True":
set_state(self.expand_name("{endpoint_name}.is-master"))
set_state(self.expand_name("{endpoint_name}.configured"))
elif config["is_master"] == "False":
set_state(self.expand_name("{endpoint_name}.is-worker"))
set_state(self.expand_name("{endpoint_name}.configured"))
else:
remove_state(self.expand_name('{endpoint_name}.configured'))
remove_state(self.expand_name('endpoint.{endpoint_name}.changed'))
remove_state(self.expand_name("{endpoint_name}.configured"))
remove_state(self.expand_name("endpoint.{endpoint_name}.changed"))
@when_not('endpoint.{endpoint_name}.joined')
@when_not("endpoint.{endpoint_name}.joined")
def broken(self):
''' Indicate the relation is no longer available and not connected. '''
remove_state(self.expand_name('{endpoint_name}.connected'))
remove_state(self.expand_name('{endpoint_name}.is-master'))
remove_state(self.expand_name('{endpoint_name}.is-worker'))
remove_state(self.expand_name('{endpoint_name}.configured'))
"""Indicate the relation is no longer available and not connected."""
remove_state(self.expand_name("{endpoint_name}.connected"))
remove_state(self.expand_name("{endpoint_name}.is-master"))
remove_state(self.expand_name("{endpoint_name}.is-worker"))
remove_state(self.expand_name("{endpoint_name}.configured"))
def get_config(self):
''' Get the kubernetes configuration information. '''
"""Get the kubernetes configuration information."""
return self.all_joined_units.received_raw
def set_config(self, cidr, cni_conf_file):
''' Sets the CNI configuration information. '''
"""Sets the CNI configuration information."""
for relation in self.relations:
relation.to_publish_raw.update({
'cidr': cidr,
'cni-conf-file': cni_conf_file
})
relation.to_publish_raw.update(
{"cidr": cidr, "cni-conf-file": cni_conf_file}
)

View File

@ -2,22 +2,26 @@
skipsdist = True
envlist = lint,py3
[tox:travis]
3.5: lint,py3
3.6: lint,py3
3.7: lint,py3
[testenv]
basepython = python3
setenv =
PYTHONPATH={toxinidir}:{toxinidir}/lib
PYTHONBREAKPOINT=ipdb.set_trace
deps =
pyyaml
pytest
flake8
black
ipdb
charms.unit_test
commands = pytest --tb native -s {posargs}
[testenv:lint]
envdir = {toxworkdir}/py3
commands = flake8 --max-line-length=88 {toxinidir}
commands =
flake8 {toxinidir}
black --check {toxinidir}
[flake8]
exclude=.tox
max-line-length = 88

View File

@ -5,6 +5,7 @@
- "layer:basic"
- "layer:leadership"
- "layer:status"
- "layer:kubernetes-common"
"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"]
"options":
"basic":
@ -15,6 +16,7 @@
"leadership": {}
"status":
"patch-hookenv": !!bool "true"
"kubernetes-common": {}
"calico": {}
"repo": "https://github.com/juju-solutions/layer-calico.git"
"is": "calico"

View File

@ -199,7 +199,13 @@ def bootstrap_charm_deps():
# a set so that we can ignore the pre-install packages and let pip
# choose the best version in case there are multiple from layer
# conflicts)
pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
_versions = _load_wheelhouse_versions()
_pkgs = _versions.keys() - set(pre_install_pkgs)
# add back the versions such that each package in pkgs is
# <package_name>==<version>.
# This ensures that pip 20.3.4+ will install the packages from the
# wheelhouse without (erroneously) flagging an error.
pkgs = _add_back_versions(_pkgs, _versions)
reinstall_flag = '--force-reinstall'
if not cfg.get('use_venv', True) and pre_eoan:
reinstall_flag = '--ignore-installed'
@ -278,6 +284,55 @@ def _load_wheelhouse_versions():
return versions
def _add_back_versions(pkgs, versions):
"""Add back the version strings to each of the packages.
The versions are LooseVersion() from _load_wheelhouse_versions(). This
function strips the ".zip" or ".tar.gz" from the end of the version string
and adds it back to the package in the form of <package_name>==<version>
If a package name is not a key in the versions dictionary, then it is
returned in the list unchanged.
:param pkgs: A list of package names
:type pkgs: List[str]
:param versions: A map of package to LooseVersion
:type versions: Dict[str, LooseVersion]
:returns: A list of (maybe) versioned packages
:rtype: List[str]
"""
def _strip_ext(s):
"""Strip an extension (if it exists) from the string
:param s: the string to strip an extension off if it exists
:type s: str
:returns: string without an extension of .zip or .tar.gz
:rtype: str
"""
for ending in [".zip", ".tar.gz"]:
if s.endswith(ending):
return s[:-len(ending)]
return s
def _maybe_add_version(pkg):
"""Maybe add back the version number to a package if it exists.
Adds the version number, if the package exists in the lexically
captured `versions` dictionary, in the form <pkg>==<version>. Strips
the extension if it exists.
:param pkg: the package name to (maybe) add the version number to.
:type pkg: str
"""
try:
return "{}=={}".format(pkg, _strip_ext(str(versions[pkg])))
except KeyError:
pass
return pkg
return [_maybe_add_version(pkg) for pkg in pkgs]
def _update_if_newer(pip, pkgs):
installed = _load_installed_versions(pip)
wheelhouse = _load_wheelhouse_versions()

View File

@ -0,0 +1,924 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import re
import os
import subprocess
import hashlib
import json
import traceback
import random
import string
import tempfile
import yaml
from base64 import b64decode, b64encode
from pathlib import Path
from subprocess import check_output, check_call
from socket import gethostname, getfqdn
from shlex import split
from subprocess import CalledProcessError
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core import host
from charmhelpers.core.templating import render
from charms.reactive import endpoint_from_flag, is_state
from time import sleep
AUTH_SECRET_NS = "kube-system"
AUTH_SECRET_TYPE = "juju.is/token-auth"
db = unitdata.kv()
kubeclientconfig_path = "/root/.kube/config"
gcp_creds_env_key = "GOOGLE_APPLICATION_CREDENTIALS"
kubeproxyconfig_path = "/root/cdk/kubeproxyconfig"
certs_dir = Path("/root/cdk")
ca_crt_path = certs_dir / "ca.crt"
server_crt_path = certs_dir / "server.crt"
server_key_path = certs_dir / "server.key"
client_crt_path = certs_dir / "client.crt"
client_key_path = certs_dir / "client.key"
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = "{} --version".format(bin_name).split()
version_string = subprocess.check_output(cmd).decode("utf-8")
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def retry(times, delay_secs):
"""Decorator for retrying a method call.
Args:
times: How many times should we retry before giving up
delay_secs: Delay in secs
Returns: A callable that would return the last call outcome
"""
def retry_decorator(func):
"""Decorator to wrap the function provided.
Args:
func: Provided function should return either True od False
Returns: A callable that would return the last call outcome
"""
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
attempt = 0
while not res and attempt < times:
sleep(delay_secs)
res = func(*args, **kwargs)
if res:
break
attempt += 1
return res
return _wrapped
return retry_decorator
def calculate_resource_checksum(resource):
"""Calculate a checksum for a resource"""
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, "rb") as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def get_resource_checksum_db_key(checksum_prefix, resource):
"""Convert a resource name to a resource checksum database key."""
return checksum_prefix + resource
def migrate_resource_checksums(checksum_prefix, snap_resources):
"""Migrate resource checksums from the old schema to the new one"""
for resource in snap_resources:
new_key = get_resource_checksum_db_key(checksum_prefix, resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = "reactive.files_changed." + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
hookenv.status_set("maintenance", "Checking resources")
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
return True
return False
def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
for resource in snap_resources:
key = get_resource_checksum_db_key(checksum_prefix, resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def get_ingress_address(endpoint_name, ignore_addresses=None):
try:
network_info = hookenv.network_get(endpoint_name)
except NotImplementedError:
network_info = {}
if not network_info or "ingress-addresses" not in network_info:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get("private-address")
addresses = network_info["ingress-addresses"]
if ignore_addresses:
hookenv.log("ingress-addresses before filtering: {}".format(addresses))
iter_filter = filter(lambda item: item not in ignore_addresses, addresses)
addresses = list(iter_filter)
hookenv.log("ingress-addresses after filtering: {}".format(addresses))
# Need to prefer non-fan IP addresses due to various issues, e.g.
# https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
# Fan typically likes to use IPs in the 240.0.0.0/4 block, so we'll
# prioritize those last. Not technically correct, but good enough.
try:
sort_key = lambda a: int(a.partition(".")[0]) >= 240 # noqa: E731
addresses = sorted(addresses, key=sort_key)
except Exception:
hookenv.log(traceback.format_exc())
return addresses[0]
def get_ingress_address6(endpoint_name):
try:
network_info = hookenv.network_get(endpoint_name)
except NotImplementedError:
network_info = {}
if not network_info or "ingress-addresses" not in network_info:
return None
addresses = network_info["ingress-addresses"]
for addr in addresses:
ip_addr = ipaddress.ip_interface(addr).ip
if ip_addr.version == 6:
return str(ip_addr)
else:
return None
def service_restart(service_name):
hookenv.status_set("maintenance", "Restarting {0} service".format(service_name))
host.service_restart(service_name)
def service_start(service_name):
hookenv.log("Starting {0} service.".format(service_name))
host.service_stop(service_name)
def service_stop(service_name):
hookenv.log("Stopping {0} service.".format(service_name))
host.service_stop(service_name)
def arch():
"""Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes."""
# Get the package architecture for this system.
architecture = check_output(["dpkg", "--print-architecture"]).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode("utf-8")
return architecture
def get_service_ip(service, namespace="kube-system", errors_fatal=True):
try:
output = kubectl(
"get", "service", "--namespace", namespace, service, "--output", "json"
)
except CalledProcessError:
if errors_fatal:
raise
else:
return None
else:
svc = json.loads(output.decode())
return svc["spec"]["clusterIP"]
def kubectl(*args):
"""Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails."""
command = ["kubectl", "--kubeconfig=" + kubeclientconfig_path] + list(args)
hookenv.log("Executing {}".format(command))
return check_output(command)
def kubectl_success(*args):
"""Runs kubectl with the given args. Returns True if successful, False if
not."""
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
"""Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
"""
# Deletions are a special case
if operation == "delete":
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, "-f", manifest, "--now")
else:
# Guard against an error re-creating the same manifest multiple times
if operation == "create":
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success("get", "-f", manifest):
hookenv.log("Skipping definition for {}".format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, "-f", manifest)
def get_node_name():
kubelet_extra_args = parse_extra_args("kubelet-extra-args")
cloud_provider = kubelet_extra_args.get("cloud-provider", "")
if is_state("endpoint.aws.ready"):
cloud_provider = "aws"
elif is_state("endpoint.gcp.ready"):
cloud_provider = "gce"
elif is_state("endpoint.openstack.ready"):
cloud_provider = "openstack"
elif is_state("endpoint.vsphere.ready"):
cloud_provider = "vsphere"
elif is_state("endpoint.azure.ready"):
cloud_provider = "azure"
if cloud_provider == "aws":
return getfqdn().lower()
else:
return gethostname().lower()
def create_kubeconfig(
kubeconfig,
server,
ca,
key=None,
certificate=None,
user="ubuntu",
context="juju-context",
cluster="juju-cluster",
password=None,
token=None,
keystone=False,
aws_iam_cluster_id=None,
):
"""Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster."""
if not key and not certificate and not password and not token:
raise ValueError("Missing authentication mechanism.")
elif key and not certificate:
raise ValueError("Missing certificate.")
elif not key and certificate:
raise ValueError("Missing key.")
elif token and password:
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
raise ValueError("Token and Password are mutually exclusive.")
old_kubeconfig = Path(kubeconfig)
new_kubeconfig = Path(str(kubeconfig) + ".new")
# Create the config file with the address of the master server.
cmd = (
"kubectl config --kubeconfig={0} set-cluster {1} "
"--server={2} --certificate-authority={3} --embed-certs=true"
)
check_call(split(cmd.format(new_kubeconfig, cluster, server, ca)))
# Delete old users
cmd = "kubectl config --kubeconfig={0} unset users"
check_call(split(cmd.format(new_kubeconfig)))
# Create the credentials using the client flags.
cmd = "kubectl config --kubeconfig={0} " "set-credentials {1} ".format(
new_kubeconfig, user
)
if key and certificate:
cmd = (
"{0} --client-key={1} --client-certificate={2} "
"--embed-certs=true".format(cmd, key, certificate)
)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = "kubectl config --kubeconfig={0} set-context {1} " "--cluster={2} --user={3}"
check_call(split(cmd.format(new_kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = "kubectl config --kubeconfig={0} use-context {1}"
check_call(split(cmd.format(new_kubeconfig, context)))
if keystone:
# create keystone user
cmd = "kubectl config --kubeconfig={0} " "set-credentials keystone-user".format(
new_kubeconfig
)
check_call(split(cmd))
# create keystone context
cmd = (
"kubectl config --kubeconfig={0} "
"set-context --cluster={1} "
"--user=keystone-user keystone".format(new_kubeconfig, cluster)
)
check_call(split(cmd))
# use keystone context
cmd = "kubectl config --kubeconfig={0} " "use-context keystone".format(
new_kubeconfig
)
check_call(split(cmd))
# manually add exec command until kubectl can do it for us
with open(new_kubeconfig, "r") as f:
content = f.read()
content = content.replace(
"""- name: keystone-user
user: {}""",
"""- name: keystone-user
user:
exec:
command: "/snap/bin/client-keystone-auth"
apiVersion: "client.authentication.k8s.io/v1beta1"
""",
)
with open(new_kubeconfig, "w") as f:
f.write(content)
if aws_iam_cluster_id:
# create aws-iam context
cmd = (
"kubectl config --kubeconfig={0} "
"set-context --cluster={1} "
"--user=aws-iam-user aws-iam-authenticator"
)
check_call(split(cmd.format(new_kubeconfig, cluster)))
# append a user for aws-iam
cmd = (
"kubectl --kubeconfig={0} config set-credentials "
"aws-iam-user --exec-command=aws-iam-authenticator "
'--exec-arg="token" --exec-arg="-i" --exec-arg="{1}" '
'--exec-arg="-r" --exec-arg="<<insert_arn_here>>" '
"--exec-api-version=client.authentication.k8s.io/v1alpha1"
)
check_call(split(cmd.format(new_kubeconfig, aws_iam_cluster_id)))
# not going to use aws-iam context by default since we don't have
# the desired arn. This will make the config not usable if copied.
# cmd = 'kubectl config --kubeconfig={0} ' \
# 'use-context aws-iam-authenticator'.format(new_kubeconfig)
# check_call(split(cmd))
if old_kubeconfig.exists():
changed = new_kubeconfig.read_text() != old_kubeconfig.read_text()
else:
changed = True
if changed:
new_kubeconfig.rename(old_kubeconfig)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, "").split()
args = {}
for element in elements:
if "=" in element:
key, _, value = element.partition("=")
args[key] = value
else:
args[element] = "true"
return args
def configure_kubernetes_service(key, service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = key + service
prev_snap_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
args.update(base_args)
args.update(extra_args)
# CIS benchmark action may inject kv config to pass failing tests. Merge
# these after the func args as they should take precedence.
cis_args_key = "cis-" + service
cis_args = db.get(cis_args_key) or {}
args.update(cis_args)
# Remove any args with 'None' values (all k8s args are 'k=v') and
# construct an arg string for use by 'snap set'.
args = {k: v for k, v in args.items() if v is not None}
args = ['--%s="%s"' % arg for arg in args.items()]
args = " ".join(args)
snap_opts = {}
for arg in prev_snap_args:
# remove previous args by setting to null
snap_opts[arg] = "null"
snap_opts["args"] = args
snap_opts = ["%s=%s" % opt for opt in snap_opts.items()]
cmd = ["snap", "set", service] + snap_opts
check_call(cmd)
# Now that we've started doing snap configuration through the "args"
# option, we should never need to clear previous args again.
db.set(prev_args_key, {})
def _snap_common_path(component):
return Path("/var/snap/{}/common".format(component))
def cloud_config_path(component):
return _snap_common_path(component) / "cloud-config.conf"
def _gcp_creds_path(component):
return _snap_common_path(component) / "gcp-creds.json"
def _daemon_env_path(component):
return _snap_common_path(component) / "environment"
def _cloud_endpoint_ca_path(component):
return _snap_common_path(component) / "cloud-endpoint-ca.crt"
def encryption_config_path():
apiserver_snap_common_path = _snap_common_path("kube-apiserver")
encryption_conf_dir = apiserver_snap_common_path / "encryption"
return encryption_conf_dir / "encryption_config.yaml"
def write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag("endpoint.gcp.ready")
creds_path = _gcp_creds_path(component)
with creds_path.open("w") as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(
"[Global]\n" "token-url = nil\n" "multizone = true\n"
)
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith("\n"):
daemon_env += "\n"
else:
daemon_env = ""
if gcp_creds_env_key not in daemon_env:
daemon_env += "{}={}\n".format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def generate_openstack_cloud_config():
# openstack requires additional credentials setup
openstack = endpoint_from_flag("endpoint.openstack.ready")
lines = [
"[Global]",
"auth-url = {}".format(openstack.auth_url),
"region = {}".format(openstack.region),
"username = {}".format(openstack.username),
"password = {}".format(openstack.password),
"tenant-name = {}".format(openstack.project_name),
"domain-name = {}".format(openstack.user_domain_name),
"tenant-domain-name = {}".format(openstack.project_domain_name),
]
if openstack.endpoint_tls_ca:
lines.append("ca-file = /etc/config/endpoint-ca.cert")
lines.extend(
[
"",
"[LoadBalancer]",
]
)
if openstack.has_octavia in (True, None):
# Newer integrator charm will detect whether underlying OpenStack has
# Octavia enabled so we can set this intelligently. If we're still
# related to an older integrator, though, default to assuming Octavia
# is available.
lines.append("use-octavia = true")
else:
lines.append("use-octavia = false")
lines.append("lb-provider = haproxy")
if openstack.subnet_id:
lines.append("subnet-id = {}".format(openstack.subnet_id))
if openstack.floating_network_id:
lines.append("floating-network-id = {}".format(openstack.floating_network_id))
if openstack.lb_method:
lines.append("lb-method = {}".format(openstack.lb_method))
if openstack.manage_security_groups:
lines.append(
"manage-security-groups = {}".format(openstack.manage_security_groups)
)
if any(
[openstack.bs_version, openstack.trust_device_path, openstack.ignore_volume_az]
):
lines.append("")
lines.append("[BlockStorage]")
if openstack.bs_version is not None:
lines.append("bs-version = {}".format(openstack.bs_version))
if openstack.trust_device_path is not None:
lines.append("trust-device-path = {}".format(openstack.trust_device_path))
if openstack.ignore_volume_az is not None:
lines.append("ignore-volume-az = {}".format(openstack.ignore_volume_az))
return "\n".join(lines) + "\n"
def write_azure_snap_config(component):
azure = endpoint_from_flag("endpoint.azure.ready")
comp_cloud_config_path = cloud_config_path(component)
comp_cloud_config_path.write_text(
json.dumps(
{
"useInstanceMetadata": True,
"useManagedIdentityExtension": azure.managed_identity,
"subscriptionId": azure.subscription_id,
"resourceGroup": azure.resource_group,
"location": azure.resource_group_location,
"vnetName": azure.vnet_name,
"vnetResourceGroup": azure.vnet_resource_group,
"subnetName": azure.subnet_name,
"securityGroupName": azure.security_group_name,
"loadBalancerSku": "standard",
"securityGroupResourceGroup": azure.security_group_resource_group,
"aadClientId": azure.aad_client_id,
"aadClientSecret": azure.aad_client_secret,
"tenantId": azure.tenant_id,
}
)
)
def configure_kube_proxy(
configure_prefix, api_servers, cluster_cidr, bind_address=None
):
kube_proxy_opts = {}
kube_proxy_opts["cluster-cidr"] = cluster_cidr
kube_proxy_opts["kubeconfig"] = kubeproxyconfig_path
kube_proxy_opts["logtostderr"] = "true"
kube_proxy_opts["v"] = "0"
num_apis = len(api_servers)
kube_proxy_opts["master"] = api_servers[get_unit_number() % num_apis]
kube_proxy_opts["hostname-override"] = get_node_name()
if bind_address:
kube_proxy_opts["bind-address"] = bind_address
elif is_ipv6(cluster_cidr):
kube_proxy_opts["bind-address"] = "::"
if host.is_container():
kube_proxy_opts["conntrack-max-per-core"] = "0"
if is_dual_stack(cluster_cidr):
kube_proxy_opts["feature-gates"] = "IPv6DualStack=true"
configure_kubernetes_service(
configure_prefix, "kube-proxy", kube_proxy_opts, "proxy-extra-args"
)
def get_unit_number():
return int(hookenv.local_unit().split("/")[1])
def cluster_cidr():
"""Return the cluster CIDR provided by the CNI"""
cni = endpoint_from_flag("cni.available")
if not cni:
return None
config = hookenv.config()
if "default-cni" in config:
# master
default_cni = config["default-cni"]
else:
# worker
kube_control = endpoint_from_flag("kube-control.dns.available")
if not kube_control:
return None
default_cni = kube_control.get_default_cni()
return cni.get_config(default=default_cni)["cidr"]
def is_dual_stack(cidrs):
"""Detect IPv4/IPv6 dual stack from CIDRs"""
return {net.version for net in get_networks(cidrs)} == {4, 6}
def is_ipv4(cidrs):
"""Detect IPv6 from CIDRs"""
return get_ipv4_network(cidrs) is not None
def is_ipv6(cidrs):
"""Detect IPv6 from CIDRs"""
return get_ipv6_network(cidrs) is not None
def is_ipv6_preferred(cidrs):
"""Detect if IPv6 is preffered from CIDRs"""
return get_networks(cidrs)[0].version == 6
def get_networks(cidrs):
"""Convert a comma-separated list of CIDRs to a list of networks."""
if not cidrs:
return []
return [ipaddress.ip_interface(cidr).network for cidr in cidrs.split(",")]
def get_ipv4_network(cidrs):
"""Get the IPv4 network from the given CIDRs or None"""
return {net.version: net for net in get_networks(cidrs)}.get(4)
def get_ipv6_network(cidrs):
"""Get the IPv6 network from the given CIDRs or None"""
return {net.version: net for net in get_networks(cidrs)}.get(6)
def enable_ipv6_forwarding():
"""Enable net.ipv6.conf.all.forwarding in sysctl if it is not already."""
check_call(["sysctl", "net.ipv6.conf.all.forwarding=1"])
def get_bind_addrs(ipv4=True, ipv6=True):
"""Get all global-scoped addresses that we might bind to."""
try:
output = check_output(["ip", "-br", "addr", "show", "scope", "global"])
except CalledProcessError:
# stderr will have any details, and go to the log
hookenv.log("Unable to determine global addresses", hookenv.ERROR)
return []
ignore_interfaces = ("lxdbr", "flannel", "cni", "virbr", "docker")
accept_versions = set()
if ipv4:
accept_versions.add(4)
if ipv6:
accept_versions.add(6)
addrs = []
for line in output.decode("utf8").splitlines():
intf, state, *intf_addrs = line.split()
if state != "UP" or any(
intf.startswith(prefix) for prefix in ignore_interfaces
):
continue
for addr in intf_addrs:
ip_addr = ipaddress.ip_interface(addr).ip
if ip_addr.version in accept_versions:
addrs.append(str(ip_addr))
return addrs
class InvalidVMwareHost(Exception):
pass
def _get_vmware_uuid():
serial_id_file = "/sys/class/dmi/id/product_serial"
# The serial id from VMWare VMs comes in following format:
# VMware-42 28 13 f5 d4 20 71 61-5d b0 7b 96 44 0c cf 54
try:
with open(serial_id_file, "r") as f:
serial_string = f.read().strip()
if "VMware-" not in serial_string:
hookenv.log(
"Unable to find VMware ID in "
"product_serial: {}".format(serial_string)
)
raise InvalidVMwareHost
serial_string = (
serial_string.split("VMware-")[1].replace(" ", "").replace("-", "")
)
uuid = "%s-%s-%s-%s-%s" % (
serial_string[0:8],
serial_string[8:12],
serial_string[12:16],
serial_string[16:20],
serial_string[20:32],
)
except IOError as err:
hookenv.log("Unable to read UUID from sysfs: {}".format(err))
uuid = "UNKNOWN"
return uuid
def token_generator(length=32):
"""Generate a random token for use in account tokens.
param: length - the length of the token to generate
"""
alpha = string.ascii_letters + string.digits
token = "".join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
def get_secret_names():
"""Return a dict of 'username: secret_id' for Charmed Kubernetes users."""
try:
output = kubectl(
"get",
"secrets",
"-n",
AUTH_SECRET_NS,
"--field-selector",
"type={}".format(AUTH_SECRET_TYPE),
"-o",
"json",
).decode("UTF-8")
except (CalledProcessError, FileNotFoundError):
# The api server may not be up, or we may be trying to run kubelet before
# the snap is installed. Send back an empty dict.
hookenv.log("Unable to get existing secrets", level=hookenv.WARNING)
return {}
secrets = json.loads(output)
secret_names = {}
if "items" in secrets:
for secret in secrets["items"]:
try:
secret_id = secret["metadata"]["name"]
username_b64 = secret["data"]["username"].encode("UTF-8")
except (KeyError, TypeError):
# CK secrets will have populated 'data', but not all secrets do
continue
secret_names[b64decode(username_b64).decode("UTF-8")] = secret_id
return secret_names
def generate_rfc1123(length=10):
"""Generate a random string compliant with RFC 1123.
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
param: length - the length of the string to generate
"""
length = 253 if length > 253 else length
valid_chars = string.ascii_lowercase + string.digits
rand_str = "".join(random.SystemRandom().choice(valid_chars) for _ in range(length))
return rand_str
def create_secret(token, username, user, groups=None):
secrets = get_secret_names()
if username in secrets:
# Use existing secret ID if one exists for our username
secret_id = secrets[username]
else:
# secret IDs must be unique and rfc1123 compliant
sani_name = re.sub("[^0-9a-z.-]+", "-", user.lower())
secret_id = "auth-{}-{}".format(sani_name, generate_rfc1123(10))
# The authenticator expects tokens to be in the form user::token
token_delim = "::"
if token_delim not in token:
token = "{}::{}".format(user, token)
context = {
"type": AUTH_SECRET_TYPE,
"secret_name": secret_id,
"secret_namespace": AUTH_SECRET_NS,
"user": b64encode(user.encode("UTF-8")).decode("utf-8"),
"username": b64encode(username.encode("UTF-8")).decode("utf-8"),
"password": b64encode(token.encode("UTF-8")).decode("utf-8"),
"groups": b64encode(groups.encode("UTF-8")).decode("utf-8") if groups else "",
}
with tempfile.NamedTemporaryFile() as tmp_manifest:
render("cdk.auth-webhook-secret.yaml", tmp_manifest.name, context=context)
if kubectl_manifest("apply", tmp_manifest.name):
hookenv.log("Created secret for {}".format(username))
return True
else:
hookenv.log("WARN: Unable to create secret for {}".format(username))
return False
def get_secret_password(username):
"""Get the password for the given user from the secret that CK created."""
try:
output = kubectl(
"get",
"secrets",
"-n",
AUTH_SECRET_NS,
"--field-selector",
"type={}".format(AUTH_SECRET_TYPE),
"-o",
"json",
).decode("UTF-8")
except CalledProcessError:
# NB: apiserver probably isn't up. This can happen on boostrap or upgrade
# while trying to build kubeconfig files. If we need the 'admin' token during
# this time, pull it directly out of the kubeconfig file if possible.
token = None
if username == "admin":
admin_kubeconfig = Path("/root/.kube/config")
if admin_kubeconfig.exists():
data = yaml.safe_load(admin_kubeconfig.read_text())
try:
token = data["users"][0]["user"]["token"]
except (KeyError, IndexError, TypeError):
pass
return token
except FileNotFoundError:
# New deployments may ask for a token before the kubectl snap is installed.
# Give them nothing!
return None
secrets = json.loads(output)
if "items" in secrets:
for secret in secrets["items"]:
try:
data_b64 = secret["data"]
password_b64 = data_b64["password"].encode("UTF-8")
username_b64 = data_b64["username"].encode("UTF-8")
except (KeyError, TypeError):
# CK authn secrets will have populated 'data', but not all secrets do
continue
password = b64decode(password_b64).decode("UTF-8")
secret_user = b64decode(username_b64).decode("UTF-8")
if username == secret_user:
return password
return None

View File

@ -12,8 +12,9 @@ from subprocess import check_call, check_output, CalledProcessError, STDOUT
from charms.leadership import leader_get, leader_set
from charms.reactive import when, when_not, when_any, set_state, remove_state
from charms.reactive import hook, is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import data_changed
from charms.reactive import endpoint_from_flag, endpoint_from_name
from charms.reactive import data_changed, any_file_changed
from charms.reactive import register_trigger
from charmhelpers.core.hookenv import (
log,
resource_get,
@ -32,7 +33,8 @@ from charmhelpers.core.host import (
service_running
)
from charmhelpers.core.templating import render
from charms.layer import status
from charms.layer import kubernetes_common, status
from charms.layer.kubernetes_common import kubectl
# TODO:
# - Handle the 'stop' hook by stopping and uninstalling all the things.
@ -52,6 +54,10 @@ ETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')
ETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')
CALICO_UPGRADE_DIR = '/opt/calico-upgrade'
register_trigger(
when="cni.kubeconfig.changed", clear_flag="calico.service.installed"
)
@hook('upgrade-charm')
def upgrade_charm():
@ -75,6 +81,8 @@ def upgrade_charm():
'calico-v3-npc-cleanup-needed': True,
'calico-v3-completion-needed': True
})
cni = endpoint_from_name('cni')
cni.manage_flags()
@when('leadership.is_leader', 'leadership.set.calico-v3-data-migration-needed',
@ -238,6 +246,7 @@ def check_etcd_changes():
ETCD_CA_PATH)
remove_state('calico.service.installed')
remove_state('calico.npc.deployed')
remove_state('calico.cni.configured')
def get_mtu():
@ -278,13 +287,52 @@ def get_bind_address():
return unit_private_ip()
@when('leadership.is_leader', 'leadership.set.calico-v3-data-ready')
@when_not('leadership.set.calico-node-token')
def create_calico_node_token():
''' Create the system:calico-node user token '''
status.maintenance('Creating system:calico-node user token')
token = kubernetes_common.token_generator()
user = 'system:calico-node'
success = kubernetes_common.create_secret(
token=token,
username=user,
user=user
)
if not success:
log('Failed to create system:calico-node user token, will retry')
status.waiting('Waiting to retry creating calico-node token')
return
# create_secret may have added the <user>:: prefix. Get the new token.
token = kubernetes_common.get_secret_password(user)
if not token:
log('Failed to get system:calico-node user token, will retry')
status.waiting('Waiting to retry creating calico-node token')
return
leader_set({'calico-node-token': token})
@when('calico.binaries.installed', 'etcd.available',
'calico.etcd-credentials.installed',
'leadership.set.calico-v3-data-ready')
'calico.etcd-credentials.installed', 'cni.kubeconfig.available',
'leadership.set.calico-node-token', 'leadership.set.calico-v3-data-ready')
@when_not('calico.service.installed')
def install_calico_service():
''' Install the calico-node systemd service. '''
status.maintenance('Installing calico-node service.')
with open(kubernetes_common.kubeclientconfig_path) as f:
kubeconfig = yaml.safe_load(f)
any_file_changed([kubernetes_common.kubeclientconfig_path])
kubeconfig['users'] = [{
'name': 'calico-node',
'user': {
'token': leader_get('calico-node-token')
}
}]
kubeconfig['contexts'][0]['context']['user'] = 'calico-node'
with open('/opt/calicoctl/kubeconfig', 'w') as f:
yaml.dump(kubeconfig, f)
etcd = endpoint_from_flag('etcd.available')
service_path = os.path.join(os.sep, 'lib', 'systemd', 'system',
'calico-node.service')
@ -309,6 +357,7 @@ def install_calico_service():
check_call(['systemctl', 'daemon-reload'])
service_restart('calico-node')
service('enable', 'calico-node')
remove_state('cni.kubeconfig.changed')
set_state('calico.service.installed')
@ -398,14 +447,13 @@ def configure_cni():
cni = endpoint_from_flag('cni.is-worker')
etcd = endpoint_from_flag('etcd.available')
os.makedirs('/etc/cni/net.d', exist_ok=True)
cni_config = cni.get_config()
ip_versions = {net.version for net in get_networks(charm_config('cidr'))}
context = {
'connection_string': etcd.get_connection_string(),
'etcd_key_path': ETCD_KEY_PATH,
'etcd_cert_path': ETCD_CERT_PATH,
'etcd_ca_path': ETCD_CA_PATH,
'kubeconfig_path': cni_config['kubeconfig_path'],
'kubeconfig_path': '/opt/calicoctl/kubeconfig',
'mtu': get_mtu(),
'assign_ipv4': 'true' if 4 in ip_versions else 'false',
'assign_ipv6': 'true' if 6 in ip_versions else 'false',
@ -483,6 +531,14 @@ def configure_bgp_globals():
spec = bgp_config['spec']
spec['asNumber'] = config['global-as-number']
spec['nodeToNodeMeshEnabled'] = config['node-to-node-mesh']
spec['serviceClusterIPs'] = [
{'cidr': cidr}
for cidr in config['bgp-service-cluster-ips'].split()
]
spec['serviceExternalIPs'] = [
{'cidr': cidr}
for cidr in config['bgp-service-external-ips'].split()
]
calicoctl_apply(bgp_config)
except CalledProcessError:
log(traceback.format_exc())
@ -493,7 +549,9 @@ def configure_bgp_globals():
@when_any('config.changed.global-as-number',
'config.changed.node-to-node-mesh')
'config.changed.node-to-node-mesh',
'config.changed.bgp-service-cluster-ips',
'config.changed.bgp-service-external-ips')
def reconfigure_bgp_globals():
remove_state('calico.bgp.globals.configured')
@ -709,15 +767,6 @@ def calicoctl_apply(data):
calicoctl('apply', '-f', path)
def kubectl(*args):
cmd = ['kubectl', '--kubeconfig=/root/.kube/config'] + list(args)
try:
return check_output(cmd)
except CalledProcessError as e:
log(e.output)
raise
def get_calicoctl_env():
etcd = endpoint_from_flag('etcd.available')
env = {}

View File

@ -1,8 +0,0 @@
#!/bin/bash
set -x
sudo apt update
sudo apt install -qyf docker.io
sudo snap install charm --classic
sudo snap install yq

View File

@ -1,7 +0,0 @@
#!/bin/bash
set -x
export PATH=/snap/bin:$PATH
: "${CHARM_BUILD_DIR:=/tmp/charms}"
charm build -r --force -o "$CHARM_BUILD_DIR"

View File

@ -1,53 +0,0 @@
#!/bin/bash
set -x
export PATH=/snap/bin:$PATH
: "${CHARM_BUILD_DIR:=/tmp/charms}"
charm whoami
RET=$?
if ((RET > 0)); then
echo "Not logged into charmstore"
exit 1
fi
function generate::attachments
{
./build-calico-resource.sh
touch calico-node-image.tar.gz
charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
calico-node-image=calico-node-image.tar.gz
charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
calico=calico-amd64.tar.gz
charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
calico-arm64=calico-arm64.tar.gz
charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
calico-upgrade=calico-upgrade-amd64.tar.gz
charm attach cs:~"$NAMESPACE"/"$CHARM" --channel unpublished \
calico-upgrade-arm64=calico-upgrade-arm64.tar.gz
}
function generate::resource::argument
{
py_script="
import sys
import json
resources_json = json.load(sys.stdin)
resource_map = []
for item in resources_json:
resource_map.append(f\"--resource {item['Name']}-{item['Revision']}\")
print(' '.join(resource_map))
"
charm list-resources cs:~"$NAMESPACE"/"$CHARM" --channel unpublished --format json | env python3 -c "$py_script"
}
URL=$(charm push "$CHARM_BUILD_DIR"/builds/"$CHARM"/. cs:~"$NAMESPACE"/"$CHARM" | yq r - url)
generate::attachments
if [ "$CHANNEL" != unpublished ]; then
charm release "$URL" --channel "$CHANNEL" $(generate::resource::argument)
fi

View File

@ -21,6 +21,7 @@ ExecStart=/usr/local/sbin/charm-env --charm calico conctl run \
--env ETCD_KEY_FILE={{ etcd_key_path }} \
--env NODENAME={{ nodename }} \
--env IP={{ ip }} \
--env KUBECONFIG=/opt/calicoctl/kubeconfig \
{% if ipv4 == "none" -%}
--env CALICO_ROUTER_ID="hash" \
{% endif -%}

View File

@ -104,20 +104,131 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
- apiGroups:
- ""
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups:
- ""
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@ -128,9 +239,8 @@ roleRef:
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
- kind: User
name: system:calico-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole

View File

@ -1,5 +0,0 @@
#!/bin/bash
sudo add-apt-repository ppa:juju/stable -y
sudo apt-get update
sudo apt-get install amulet python-requests -y

View File

@ -1,31 +0,0 @@
#!/usr/bin/python3
import amulet
import requests
import unittest
class TestCharm(unittest.TestCase):
def setUp(self):
self.d = amulet.Deployment()
self.d.add('layer-calico-cni')
self.d.expose('layer-calico-cni')
self.d.setup(timeout=900)
self.d.sentry.wait()
self.unit = self.d.sentry['layer-calico-cni'][0]
def test_service(self):
# test we can access over http
page = requests.get('http://{}'.format(self.unit.info['public-address']))
self.assertEqual(page.status_code, 200)
# Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
# more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
# - .info - An array of the information of that unit from Juju
# - .file(PATH) - Get the details of a file on that unit
# - .file_contents(PATH) - Get plain text output of PATH file from that unit
# - .directory(PATH) - Get details of directory
# - .directory_contents(PATH) - List files and folders in PATH on that unit
# - .relation(relation, service:rel) - Get relation data from return service

View File

@ -0,0 +1,9 @@
options:
as-number:
type: int
description: AS Number
default: 64512
bgp-peers:
type: string
description: BGP peers
default: "[]"

View File

@ -0,0 +1,7 @@
name: bird
description: |
Test charm running BIRD
summary: |
Test charm running BIRD
series:
- focal

View File

@ -0,0 +1 @@
ops

View File

@ -0,0 +1,60 @@
#!/usr/bin/env python3
import logging
from ops.charm import CharmBase
from ops.main import main
from ops.model import ActiveStatus, MaintenanceStatus
from subprocess import check_call
import yaml
log = logging.getLogger(__name__)
bird_config_base = """
log syslog all;
debug protocols all;
protocol kernel {
persist;
scan time 20;
export all;
}
protocol device {
scan time 10;
}
"""
bird_config_peer = """
protocol bgp {
import all;
local as %s;
neighbor %s as %s;
direct;
}
"""
class BirdCharm(CharmBase):
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.install, self.install)
self.framework.observe(self.on.config_changed, self.config_changed)
def install(self, event):
self.unit.status = MaintenanceStatus("Installing BIRD")
check_call(['apt-get', 'update'])
check_call(['apt-get', 'install', '-y', 'bird'])
def config_changed(self, event):
self.unit.status = MaintenanceStatus("Configuring BIRD")
as_number = self.config['as-number']
bird_config = "\n".join([bird_config_base] + [
bird_config_peer % (as_number, peer['address'], peer['as-number'])
for peer in yaml.safe_load(self.config['bgp-peers'])
])
with open('/etc/bird/bird.conf', 'w') as f:
f.write(bird_config)
check_call(['systemctl', 'reload', 'bird'])
self.unit.status = ActiveStatus()
if __name__ == "__main__":
main(BirdCharm)

View File

@ -0,0 +1,80 @@
description: A minimal two-machine Kubernetes cluster, appropriate for development.
series: focal
machines:
'0':
constraints: cores=2 mem=4G root-disk=16G
series: focal
'1':
constraints: cores=4 mem=4G root-disk=16G
series: focal
services:
containerd:
charm: cs:~containers/containerd
channel: edge
easyrsa:
charm: cs:~containers/easyrsa
channel: edge
num_units: 1
to:
- '1'
etcd:
charm: cs:~containers/etcd
channel: edge
num_units: 1
options:
channel: 3.4/stable
to:
- '0'
calico:
charm: {{calico_charm}}
resources:
calico: {{resource_path}}/calico-amd64.tar.gz
calico-arm64: {{resource_path}}/calico-arm64.tar.gz
calico-upgrade: {{resource_path}}/calico-upgrade-amd64.tar.gz
calico-upgrade-arm64: {{resource_path}}/calico-upgrade-arm64.tar.gz
calico-node-image: {{resource_path}}/calico-node-image.tar.gz
options:
ignore-loose-rpf: true
vxlan: Always
kubernetes-master:
charm: cs:~containers/kubernetes-master
channel: edge
constraints: cores=2 mem=4G root-disk=16G
expose: true
num_units: 1
options:
channel: 1.22/edge
to:
- '0'
kubernetes-worker:
charm: cs:~containers/kubernetes-worker
channel: edge
constraints: cores=4 mem=4G root-disk=16G
expose: true
num_units: 1
options:
channel: 1.22/edge
to:
- '1'
relations:
- - kubernetes-master:kube-control
- kubernetes-worker:kube-control
- - kubernetes-master:certificates
- easyrsa:client
- - kubernetes-master:etcd
- etcd:db
- - kubernetes-worker:certificates
- easyrsa:client
- - etcd:certificates
- easyrsa:client
- - calico:etcd
- etcd:db
- - calico:cni
- kubernetes-master:cni
- - calico:cni
- kubernetes-worker:cni
- - containerd:containerd
- kubernetes-worker:container-runtime
- - containerd:containerd
- kubernetes-master:container-runtime

View File

@ -0,0 +1,90 @@
from functools import partial
import pytest
from unittest import mock
from charms.layer import kubernetes_common
class TestCreateKubeConfig:
@pytest.fixture(autouse=True)
def _files(self, tmp_path):
self.cfg_file = tmp_path / "config"
self.ca_file = tmp_path / "ca.crt"
self.ca_file.write_text("foo")
self.ckc = partial(
kubernetes_common.create_kubeconfig,
self.cfg_file,
"server",
self.ca_file,
)
def test_guard_clauses(self):
with pytest.raises(ValueError):
self.ckc()
assert not self.cfg_file.exists()
with pytest.raises(ValueError):
self.ckc(token="token", password="password")
assert not self.cfg_file.exists()
with pytest.raises(ValueError):
self.ckc(key="key")
assert not self.cfg_file.exists()
def test_file_creation(self):
self.ckc(password="password")
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert cfg_data_1
def test_idempotency(self):
self.ckc(password="password")
cfg_data_1 = self.cfg_file.read_text()
self.ckc(password="password")
cfg_data_2 = self.cfg_file.read_text()
# Verify that calling w/ the same data keeps the same file contents.
assert cfg_data_2 == cfg_data_1
def test_efficient_updates(self):
self.ckc(password="old_password")
cfg_stat_1 = self.cfg_file.stat()
self.ckc(password="old_password")
cfg_stat_2 = self.cfg_file.stat()
self.ckc(password="new_password")
cfg_stat_3 = self.cfg_file.stat()
# Verify that calling with the same data doesn't
# modify the file at all, but that new data does
assert cfg_stat_1.st_mtime == cfg_stat_2.st_mtime < cfg_stat_3.st_mtime
def test_aws_iam(self):
self.ckc(password="password", aws_iam_cluster_id="aws-cluster")
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert "aws-cluster" in cfg_data_1
def test_keystone(self):
self.ckc(password="password", keystone=True)
assert self.cfg_file.exists()
cfg_data_1 = self.cfg_file.read_text()
assert "keystone-user" in cfg_data_1
assert "exec" in cfg_data_1
def test_atomic_updates(self):
self.ckc(password="old_password")
with self.cfg_file.open("rt") as f:
# Perform a write in the middle of reading
self.ckc(password="new_password")
# Read data from existing FH after new data was written
cfg_data_1 = f.read()
# Read updated data
cfg_data_2 = self.cfg_file.read_text()
# Verify that the in-progress read didn't get any of the new data
assert cfg_data_1 != cfg_data_2
assert "old_password" in cfg_data_1
assert "new_password" in cfg_data_2
@mock.patch("charmhelpers.core.hookenv.network_get", autospec=True)
def test_get_ingress_address(self, network_get):
network_get.return_value = {"ingress-addresses": ["1.2.3.4", "5.6.7.8"]}
ingress = kubernetes_common.get_ingress_address("endpoint-name")
assert ingress == "1.2.3.4"
ingress = kubernetes_common.get_ingress_address("endpoint-name", ["1.2.3.4"])
assert ingress == "5.6.7.8"

View File

@ -0,0 +1,36 @@
from kubernetes_wrapper import Kubernetes
import logging
import pytest
import random
import string
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
@pytest.mark.asyncio
async def kubernetes(ops_test):
kubeconfig_path = ops_test.tmp_path / "kubeconfig"
retcode, stdout, stderr = await ops_test.run(
"juju", "scp", "kubernetes-master/leader:config", kubeconfig_path
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to copy kubeconfig from kubernetes-master")
namespace = "test-calico-integration-" + "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(5)
)
kubernetes = Kubernetes(namespace, kubeconfig=str(kubeconfig_path))
namespace_object = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': namespace
}
}
kubernetes.apply_object(namespace_object)
yield kubernetes
kubernetes.delete_object(namespace_object)

View File

@ -0,0 +1,139 @@
import logging
import os
import pytest
import time
import yaml
log = logging.getLogger(__name__)
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test):
resource_path = ops_test.tmp_path / "charm-resources"
resource_path.mkdir()
resource_build_script = os.path.abspath("./build-calico-resource.sh")
log.info("Building charm resources")
retcode, stdout, stderr = await ops_test.run(
resource_build_script,
cwd=resource_path
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to build charm resources")
bundle = ops_test.render_bundle(
"tests/data/bundle.yaml",
calico_charm=await ops_test.build_charm("."),
resource_path=resource_path
)
# deploy with Juju CLI because libjuju does not support local resource
# paths in bundles
log.info("Deploying bundle")
retcode, stdout, stderr = await ops_test.run(
"juju", "deploy", "-m", ops_test.model_full_name, bundle
)
if retcode != 0:
log.error(f"retcode: {retcode}")
log.error(f"stdout:\n{stdout.strip()}")
log.error(f"stderr:\n{stderr.strip()}")
pytest.fail("Failed to deploy bundle")
await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60)
async def test_bgp_service_ip_advertisement(ops_test, kubernetes):
# deploy a test service in k8s (nginx)
deployment = {
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': 'nginx'
},
'spec': {
'selector': {
'matchLabels': {
'app': 'nginx'
}
},
'template': {
'metadata': {
'labels': {
'app': 'nginx'
}
},
'spec': {
'containers': [{
'name': 'nginx',
'image': 'rocks.canonical.com/cdk/nginx:1.18',
'ports': [{
'containerPort': 80
}]
}]
}
}
}
}
service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': 'nginx'
},
'spec': {
'selector': {
'app': 'nginx'
},
'ports': [{
'protocol': 'TCP',
'port': 80
}]
}
}
kubernetes.apply_object(deployment)
kubernetes.apply_object(service)
service_ip = kubernetes.read_object(service).spec.cluster_ip
# build and deploy bird charm
bird_charm = await ops_test.build_charm("tests/data/bird-operator")
await ops_test.model.deploy(bird_charm)
await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 10)
# configure calico to peer with bird
master_config = await ops_test.model.applications['kubernetes-master'].get_config()
bird_app = ops_test.model.applications['bird']
calico_app = ops_test.model.applications['calico']
await calico_app.set_config({
'bgp-service-cluster-ips': master_config['service-cidr']['value'],
'global-bgp-peers': yaml.dump([
{'address': unit.public_address, 'as-number': 64512}
for unit in bird_app.units
])
})
# configure bird to peer with calico
await bird_app.set_config({
'bgp-peers': yaml.dump([
{'address': unit.public_address, 'as-number': 64512}
for unit in calico_app.units
])
})
# verify test service is reachable from bird
deadline = time.time() + 60 * 10
while time.time() < deadline:
retcode, stdout, stderr = await ops_test.run(
'juju', 'ssh', '-m', ops_test.model_full_name, 'bird/leader',
'curl', '--connect-timeout', '10', service_ip
)
if retcode == 0:
break
else:
pytest.fail("Failed service connection test after BGP config")
# clean up
await calico_app.set_config({
'bgp-service-cluster-ips': '',
'global-bgp-peers': '[]'
})
await bird_app.destroy()

View File

@ -0,0 +1,122 @@
import json
import string
from subprocess import CalledProcessError
from unittest.mock import Mock
from charms.layer import kubernetes_common as kc
def test_token_generator():
alphanum = string.ascii_letters + string.digits
token = kc.token_generator(10)
assert len(token) == 10
unknown_chars = set(token) - set(alphanum)
assert not unknown_chars
def test_get_secret_names(monkeypatch):
monkeypatch.setattr(kc, "kubectl", Mock())
kc.kubectl.side_effect = [
CalledProcessError(1, "none"),
FileNotFoundError,
"{}".encode("utf8"),
json.dumps(
{
"items": [
{
"metadata": {"name": "secret-id"},
"data": {"username": "dXNlcg=="},
},
],
}
).encode("utf8"),
]
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {}
assert kc.get_secret_names() == {"user": "secret-id"}
def test_generate_rfc1123():
alphanum = string.ascii_letters + string.digits
token = kc.generate_rfc1123(1000)
assert len(token) == 253
unknown_chars = set(token) - set(alphanum)
assert not unknown_chars
def test_create_secret(monkeypatch):
monkeypatch.setattr(kc, "render", Mock())
monkeypatch.setattr(kc, "kubectl_manifest", Mock())
monkeypatch.setattr(kc, "get_secret_names", Mock())
monkeypatch.setattr(kc, "generate_rfc1123", Mock())
kc.kubectl_manifest.side_effect = [True, False]
kc.get_secret_names.side_effect = [{"username": "secret-id"}, {}]
kc.generate_rfc1123.return_value = "foo"
assert kc.create_secret("token", "username", "user", "groups")
assert kc.render.call_args[1]["context"] == {
"groups": "Z3JvdXBz",
"password": "dXNlcjo6dG9rZW4=",
"secret_name": "secret-id",
"secret_namespace": "kube-system",
"type": "juju.is/token-auth",
"user": "dXNlcg==",
"username": "dXNlcm5hbWU=",
}
assert not kc.create_secret("token", "username", "user", "groups")
assert kc.render.call_args[1]["context"] == {
"groups": "Z3JvdXBz",
"password": "dXNlcjo6dG9rZW4=",
"secret_name": "auth-user-foo",
"secret_namespace": "kube-system",
"type": "juju.is/token-auth",
"user": "dXNlcg==",
"username": "dXNlcm5hbWU=",
}
def test_get_secret_password(monkeypatch):
monkeypatch.setattr(kc, "kubectl", Mock())
monkeypatch.setattr(kc, "Path", Mock())
monkeypatch.setattr(kc, "yaml", Mock())
kc.kubectl.side_effect = [
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
CalledProcessError(1, "none"),
FileNotFoundError,
json.dumps({}).encode("utf8"),
json.dumps({"items": []}).encode("utf8"),
json.dumps({"items": []}).encode("utf8"),
json.dumps({"items": [{}]}).encode("utf8"),
json.dumps({"items": [{"data": {}}]}).encode("utf8"),
json.dumps(
{"items": [{"data": {"username": "Ym9i", "password": "c2VjcmV0"}}]}
).encode("utf8"),
json.dumps(
{"items": [{"data": {"username": "dXNlcm5hbWU=", "password": "c2VjcmV0"}}]}
).encode("utf8"),
]
kc.yaml.safe_load.side_effect = [
{},
{"users": None},
{"users": []},
{"users": [{"user": {}}]},
{"users": [{"user": {"token": "secret"}}]},
]
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") is None
assert kc.get_secret_password("admin") == "secret"
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") is None
assert kc.get_secret_password("username") == "secret"

View File

@ -0,0 +1,8 @@
#!/bin/bash
build_dir="$(mktemp -d)"
function cleanup { rm -rf "$build_dir"; }
trap cleanup EXIT
charm build . --build-dir "$build_dir"
pip install -f "$build_dir/calico/wheelhouse" --no-index --no-cache-dir "$build_dir"/calico/wheelhouse/*

View File

@ -1,18 +1,41 @@
[flake8]
max-line-length = 88
[tox]
skipsdist = True
envlist = lint,py3
envlist = lint,unit,integration
[testenv]
basepython = python3
setenv =
PYTHONPATH={toxinidir}:{toxinidir}/lib
PYTHONBREAKPOINT=ipdb.set_trace
[testenv:unit]
deps =
pyyaml
pytest
flake8
charms.unit_test
ipdb
git+https://github.com/juju-solutions/charms.unit_test/#egg=charms.unit_test
commands = pytest --tb native -s {posargs}
commands = pytest --tb native -s {posargs} {toxinidir}/tests/unit
[testenv:validate-wheelhouse]
allowlist_externals = {toxinidir}/tests/validate-wheelhouse.sh
commands = {toxinidir}/tests/validate-wheelhouse.sh
[testenv:integration]
deps =
pytest
pytest-operator
aiohttp
ipdb
git+https://github.com/canonical/kubernetes-rapper@main#egg=kubernetes-wrapper
# tox only passes through the upper-case versions by default, but some
# programs, such as wget or pip, only honor the lower-case versions
passenv = http_proxy https_proxy no_proxy
commands = pytest --tb native --show-capture=no --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration
[testenv:lint]
commands = flake8 {toxinidir}/lib {toxinidir}/reactive {toxinidir}/tests
deps =
flake8
commands =
flake8 {toxinidir}/reactive {toxinidir}/lib {toxinidir}/tests

View File

@ -1 +1 @@
0ea81f0c
ccfa68be

Binary file not shown.

Binary file not shown.

View File

@ -1,47 +1,47 @@
{
"layers": [
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
"url": "layer:options"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
"branch": "refs/heads/master",
"rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083",
"url": "layer:basic"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
"url": "layer:debug"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
"url": "layer:status"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "be187bfe2ed511fc7ee29bf25f7374a2d6d34b2d",
"url": "layer:container-runtime-common"
},
{
"branch": "refs/heads/stable",
"rev": "8a4e635092c98cef3eecd27063c7b2ae030e740e",
"branch": "refs/heads/master",
"rev": "e87057806fcbeb67d222b14e1ce2e4fafdf58f9d",
"url": "containerd"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "6f927f10b97f45c566481cf57a29d433f17373e1",
"url": "interface:container-runtime"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "b59ce0c44bc52c789175750ce18b42f76c9a4578",
"url": "interface:untrusted-container-runtime"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "365ec9f348ccd561a9ec3e084c826f134676439e",
"url": "interface:docker-registry"
}
@ -52,6 +52,16 @@
"dynamic",
"unchecked"
],
".github/workflows/main.yaml": [
"containerd",
"static",
"f413dfd54279707a2dc0ebf6f5e399a1a65170a879ca126c63f1d98f543d0dd7"
],
".github/workflows/main.yml": [
"layer:basic",
"static",
"96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473"
],
".gitignore": [
"containerd",
"static",
@ -62,11 +72,6 @@
"static",
"ab2c8c5a3ae50ec307e9e19ec30a20d4765161e0cb3bddb66f09c4a1b72b7f71"
],
".travis/profile-update.yaml": [
"layer:basic",
"static",
"731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
],
"LICENSE": [
"containerd",
"static",
@ -110,7 +115,7 @@
"config.yaml": [
"containerd",
"dynamic",
"93c92f8e530d50a436eab8dd2573a78e2d911aa53e9866b08ac61d6efec7e3f4"
"9b0153cac5602ecd84e860bb784a6477d46ea61b62eb1f0d0946fce06d859c09"
],
"copyright": [
"layer:status",
@ -425,7 +430,7 @@
"lib/charms/layer/basic.py": [
"layer:basic",
"static",
"3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
"98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9"
],
"lib/charms/layer/container_runtime_common.py": [
"layer:container-runtime-common",
@ -485,7 +490,7 @@
"reactive/containerd.py": [
"containerd",
"static",
"ca60ebe176530f379308dda0bba4d193acfcf8ce1e7ec923db6438b8b2f74933"
"4e80ec104f9e8c2bfbf1577a698b1df7fa37ce10907ab2bb71f96b7f672e4639"
],
"reactive/status.py": [
"layer:status",
@ -505,7 +510,12 @@
"templates/config.toml": [
"containerd",
"static",
"d84f9f266929e684c0b0a596704f075d26b97d7a0e43b525364a77dc22d2f320"
"f149f8147f7f7997420c34530a51eafffa71038f1426e3b4e7d7e3fbf89afac3"
],
"templates/config_v2.toml": [
"containerd",
"static",
"e842b1318fceef9e839623fe082cc7d6575820331882876330337176a6cbc542"
],
"templates/proxy.conf": [
"containerd",
@ -540,12 +550,12 @@
"version": [
"containerd",
"dynamic",
"2737d85a96f3fb093896eb885501ad940a695d5b9bb1d0d3816ace9eb68df82e"
"b56954e631fc8006577e9ce5f54fd067e695024061bc82733fb1408f1880a860"
],
"wheelhouse.txt": [
"containerd",
"dynamic",
"ff85b4195a997d8df2b05ce61b4e943a2fafb9152a7a7c7d112edd723d9e7d3c"
"ae13f54eb8741a216957d0d6c39051a9d88641cb3050331d03742dc895c71959"
],
"wheelhouse/Jinja2-2.10.1.tar.gz": [
"layer:basic",
@ -553,7 +563,7 @@
"065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
],
"wheelhouse/MarkupSafe-1.1.1.tar.gz": [
"__pip__",
"layer:basic",
"dynamic",
"29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
],
@ -567,30 +577,30 @@
"dynamic",
"cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
],
"wheelhouse/certifi-2020.12.5.tar.gz": [
"wheelhouse/certifi-2021.10.8.tar.gz": [
"__pip__",
"dynamic",
"1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"
"78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"
],
"wheelhouse/chardet-4.0.0.tar.gz": [
"__pip__",
"dynamic",
"0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"
],
"wheelhouse/charmhelpers-0.20.21.tar.gz": [
"wheelhouse/charmhelpers-0.20.23.tar.gz": [
"layer:basic",
"dynamic",
"37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
"59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e"
],
"wheelhouse/charms.reactive-1.4.1.tar.gz": [
"layer:basic",
"dynamic",
"bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
],
"wheelhouse/idna-2.10.tar.gz": [
"wheelhouse/charset-normalizer-2.0.7.tar.gz": [
"__pip__",
"dynamic",
"b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"
"e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0"
],
"wheelhouse/idna-3.3.tar.gz": [
"__pip__",
"dynamic",
"9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"
],
"wheelhouse/netaddr-0.7.19.tar.gz": [
"layer:basic",
@ -607,15 +617,15 @@
"dynamic",
"c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
],
"wheelhouse/pyaml-20.4.0.tar.gz": [
"wheelhouse/pyaml-21.10.1.tar.gz": [
"__pip__",
"dynamic",
"29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
"c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383"
],
"wheelhouse/requests-2.25.1.tar.gz": [
"wheelhouse/requests-2.26.0.tar.gz": [
"containerd",
"dynamic",
"27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"
"b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
],
"wheelhouse/setuptools-41.6.0.zip": [
"layer:basic",
@ -627,15 +637,15 @@
"dynamic",
"70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
],
"wheelhouse/six-1.15.0.tar.gz": [
"wheelhouse/six-1.16.0.tar.gz": [
"__pip__",
"dynamic",
"30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
"1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
],
"wheelhouse/urllib3-1.26.4.tar.gz": [
"wheelhouse/urllib3-1.26.7.tar.gz": [
"__pip__",
"dynamic",
"e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937"
"4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"
],
"wheelhouse/wheel-0.33.6.tar.gz": [
"layer:basic",

38
containerd/.github/workflows/main.yaml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Run tests with Tox
on: [push]
jobs:
unit-tests:
name: Lint, Unit Tests
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.5, 3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Tox
run: pip install tox
- name: Run Tox
run: tox # Run tox using the version of Python in `PATH`
integration-tests:
name: Integration test with LXD
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@main
with:
provider: lxd
- name: Run integration test
run: tox -e integration

50
containerd/.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Test Suite
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.5, 3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
- name: Run lint
run: tox -e flake8
functional-test:
name: Functional test with LXD
runs-on: ubuntu-latest
timeout-minutes: 360
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install Dependencies
run: |
pip install tox
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@master
- name: Run test
run: tox -e func
- name: Show Status
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju status -m "$model"
- name: Show Error Logs
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju debug-log -m "$model" --replay --no-tail --level ERROR

View File

@ -1,12 +0,0 @@
config: {}
description: Default LXD profile - updated
devices:
eth0:
name: eth0
parent: lxdbr0
nictype: bridged
type: nic
root:
path: /
pool: default
type: disk

View File

@ -84,3 +84,41 @@
addresses) which should be accessed directly, rather than through
the proxy defined in http_proxy or https_proxy. Must be less than
2023 characters long.
"config_version":
"type": "string"
"default": "v1"
"description": |
(Use carefully, v2 is only tested for nvidia gpu operator)
Use value "v2" for this config parameter to enable new configuration format.
Config file is parsed as version 1 by default.
Version 2 uses long plugin names, i.e. "io.containerd.grpc.v1.cri" vs "cri".
"nvidia_apt_key_urls":
"type": "string"
"default": |
https://nvidia.github.io/nvidia-container-runtime/gpgkey
https://developer.download.nvidia.com/compute/cuda/repos/{id}{version_id_no_dot}/x86_64/7fa2af80.pub
"description": |
Space-separated list of APT GPG key URLs to add when using Nvidia GPUs.
Supported template options:
{id}: OS release ID, e.g. "ubuntu"
{version_id}: OS release version ID, e.g. "20.04"
{version_id_no_dot}: OS release version ID with no dot, e.g. "2004"
"nvidia_apt_sources":
"type": "string"
"default": |
deb https://nvidia.github.io/libnvidia-container/{id}{version_id}/$(ARCH) /
deb https://nvidia.github.io/nvidia-container-runtime/{id}{version_id}/$(ARCH) /
deb http://developer.download.nvidia.com/compute/cuda/repos/{id}{version_id_no_dot}/x86_64 /
"description": |
Newline-separated list of APT sources to add when using Nvidia GPUs.
Supported template options:
{id}: OS release ID, e.g. "ubuntu"
{version_id}: OS release version ID, e.g. "20.04"
{version_id_no_dot}: OS release version ID with no dot, e.g. "2004"
"nvidia_apt_packages":
"type": "string"
"default": "cuda-drivers nvidia-container-runtime"
"description": |
Space-separated list of APT packages to install when using Nvidia GPUs.

View File

@ -199,7 +199,13 @@ def bootstrap_charm_deps():
# a set so that we can ignore the pre-install packages and let pip
# choose the best version in case there are multiple from layer
# conflicts)
pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
_versions = _load_wheelhouse_versions()
_pkgs = _versions.keys() - set(pre_install_pkgs)
# add back the versions such that each package in pkgs is
# <package_name>==<version>.
# This ensures that pip 20.3.4+ will install the packages from the
# wheelhouse without (erroneously) flagging an error.
pkgs = _add_back_versions(_pkgs, _versions)
reinstall_flag = '--force-reinstall'
if not cfg.get('use_venv', True) and pre_eoan:
reinstall_flag = '--ignore-installed'
@ -278,6 +284,55 @@ def _load_wheelhouse_versions():
return versions
def _add_back_versions(pkgs, versions):
"""Add back the version strings to each of the packages.
The versions are LooseVersion() from _load_wheelhouse_versions(). This
function strips the ".zip" or ".tar.gz" from the end of the version string
and adds it back to the package in the form of <package_name>==<version>
If a package name is not a key in the versions dictionary, then it is
returned in the list unchanged.
:param pkgs: A list of package names
:type pkgs: List[str]
:param versions: A map of package to LooseVersion
:type versions: Dict[str, LooseVersion]
:returns: A list of (maybe) versioned packages
:rtype: List[str]
"""
def _strip_ext(s):
"""Strip an extension (if it exists) from the string
:param s: the string to strip an extension off if it exists
:type s: str
:returns: string without an extension of .zip or .tar.gz
:rtype: str
"""
for ending in [".zip", ".tar.gz"]:
if s.endswith(ending):
return s[:-len(ending)]
return s
def _maybe_add_version(pkg):
"""Maybe add back the version number to a package if it exists.
Adds the version number, if the package exists in the lexically
captured `versions` dictionary, in the form <pkg>==<version>. Strips
the extension if it exists.
:param pkg: the package name to (maybe) add the version number to.
:type pkg: str
"""
try:
return "{}=={}".format(pkg, _strip_ext(str(versions[pkg])))
except KeyError:
pass
return pkg
return [_maybe_add_version(pkg) for pkg in pkgs]
def _update_if_newer(pip, pkgs):
installed = _load_installed_versions(pip)
wheelhouse = _load_wheelhouse_versions()

View File

@ -18,7 +18,8 @@ from charms.reactive import (
set_state,
is_state,
remove_state,
endpoint_from_flag
endpoint_from_flag,
register_trigger
)
from charms.layer import containerd, status
@ -59,10 +60,18 @@ DB = unitdata.kv()
CONTAINERD_PACKAGE = 'containerd'
NVIDIA_PACKAGES = [
'cuda-drivers',
'nvidia-container-runtime',
]
register_trigger(
when='config.changed.nvidia_apt_key_urls',
clear_flag='containerd.nvidia.ready'
)
register_trigger(
when='config.changed.nvidia_apt_sources',
clear_flag='containerd.nvidia.ready'
)
register_trigger(
when='config.changed.nvidia_apt_packages',
clear_flag='containerd.nvidia.ready'
)
def _check_containerd():
@ -196,6 +205,22 @@ def populate_host_for_custom_registries(custom_registries):
return custom_registries
def insert_docker_io_to_custom_registries(custom_registries):
"""
Ensure the default docker.io registry exists.
Also gives a way for configuration to override the url for it.
If a docker.io host entry doesn't exist, we'll add one.
"""
if isinstance(custom_registries, list):
if not any(d.get('host') == 'docker.io' for d in custom_registries):
custom_registries.insert(0, {
"host": "docker.io",
"url": "https://registry-1.docker.io"
})
return custom_registries
def merge_custom_registries(config_directory, custom_registries,
old_custom_registries):
"""
@ -210,6 +235,7 @@ def merge_custom_registries(config_directory, custom_registries,
registries += json.loads(custom_registries)
# json string already converted to python list here
registries = populate_host_for_custom_registries(registries)
registries = insert_docker_io_to_custom_registries(registries)
old_registries = []
if (old_custom_registries):
old_registries += json.loads(old_custom_registries)
@ -246,6 +272,16 @@ def upgrade_charm():
# Re-render config in case the template has changed in the new charm.
config_changed()
# Clean up old nvidia sources.list.d files
old_source_files = [
'/etc/apt/sources.list.d/nvidia-container-runtime.list',
'/etc/apt/sources.list.d/cuda.list'
]
for source_file in old_source_files:
if os.path.exists(source_file):
os.remove(source_file)
remove_state('containerd.nvidia.ready')
@when_not('containerd.br_netfilter.enabled')
def enable_br_netfilter_module():
@ -345,48 +381,38 @@ def configure_nvidia():
status.maintenance('Installing Nvidia drivers.')
dist = host.lsb_release()
release = '{}{}'.format(
dist['DISTRIB_ID'].lower(),
dist['DISTRIB_RELEASE']
)
os_release_id = dist['DISTRIB_ID'].lower()
os_release_version_id = dist['DISTRIB_RELEASE']
os_release_version_id_no_dot = os_release_version_id.replace('.', '')
proxies = {
"http": config('http_proxy'),
"https": config('https_proxy')
}
ncr_gpg_key = requests.get(
'https://nvidia.github.io/nvidia-container-runtime/gpgkey', proxies=proxies).text
import_key(ncr_gpg_key)
with open(
'/etc/apt/sources.list.d/nvidia-container-runtime.list', 'w'
) as f:
f.write(
'deb '
'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n'
.format(release)
)
f.write(
'deb '
'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n'
.format(release)
key_urls = config('nvidia_apt_key_urls').split()
for key_url in key_urls:
formatted_key_url = key_url.format(
id=os_release_id,
version_id=os_release_version_id,
version_id_no_dot=os_release_version_id_no_dot
)
gpg_key = requests.get(formatted_key_url, proxies=proxies).text
import_key(gpg_key)
cuda_gpg_key = requests.get(
'https://developer.download.nvidia.com/'
'compute/cuda/repos/{}/x86_64/7fa2af80.pub'
.format(release.replace('.', '')), proxies=proxies
).text
import_key(cuda_gpg_key)
with open('/etc/apt/sources.list.d/cuda.list', 'w') as f:
f.write(
'deb '
'http://developer.download.nvidia.com/'
'compute/cuda/repos/{}/x86_64 /\n'
.format(release.replace('.', ''))
sources = config('nvidia_apt_sources').splitlines()
formatted_sources = [
source.format(
id=os_release_id,
version_id=os_release_version_id,
version_id_no_dot=os_release_version_id_no_dot
)
for source in sources
]
with open('/etc/apt/sources.list.d/nvidia.list', 'w') as f:
f.write('\n'.join(formatted_sources))
apt_update()
apt_install(NVIDIA_PACKAGES, fatal=True)
packages = config('nvidia_apt_packages').split()
apt_install(packages, fatal=True)
set_state('containerd.nvidia.ready')
config_changed()
@ -406,11 +432,11 @@ def purge_containerd():
apt_purge(CONTAINERD_PACKAGE, fatal=True)
if is_state('containerd.nvidia.ready'):
apt_purge(NVIDIA_PACKAGES, fatal=True)
nvidia_packages = config('nvidia_apt_packages').split()
apt_purge(nvidia_packages, fatal=True)
sources = [
'/etc/apt/sources.list.d/cuda.list',
'/etc/apt/sources.list.d/nvidia-container-runtime.list'
'/etc/apt/sources.list.d/nvidia.list'
]
for f in sources:
@ -450,6 +476,10 @@ def config_changed():
# Create "dumb" context based on Config to avoid triggering config.changed
context = dict(config())
if context['config_version'] == "v2":
template_config = "config_v2.toml"
else:
template_config = "config.toml"
config_file = 'config.toml'
config_directory = '/etc/containerd'
@ -497,7 +527,7 @@ def config_changed():
context['runtime'] = 'runc'
render(
config_file,
template_config,
os.path.join(config_directory, config_file),
context
)

View File

@ -58,8 +58,6 @@ oom_score = 0
conf_template = ""
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
{% if custom_registries -%}
{% for registry in custom_registries -%}
{% if registry.host -%}
@ -111,3 +109,4 @@ oom_score = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"

View File

@ -0,0 +1,112 @@
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
version = 2
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[plugins]
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "{{ sandbox_image }}"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false
max_container_log_line_size = 16384
[plugins."io.containerd.grpc.v1.cri".containerd]
no_pivot = false
{% if untrusted %}
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type= "io.containerd.{{ untrusted_name }}.v2"
{% endif %}
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v1"
{% if untrusted %}
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ untrusted_name }}]
runtime_type= "io.containerd.{{ untrusted_name }}.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.{{ untrusted_name }}.options]
Runtime = "{{ untrusted_binary }}"
RuntimeRoot = "{{ untrusted_path }}"
{% endif %}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
{% if custom_registries -%}
{% for registry in custom_registries -%}
{% if registry.host -%}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry.host }}"]
{% if registry.url -%}
endpoint = ["{{ registry.url}}"]
{% endif -%}
{% endif -%}
{% endfor -%}
{% endif -%}
{% if custom_registries %}
[plugins."io.containerd.grpc.v1.cri".registry.auths]
{% for registry in custom_registries %}
{% if registry.username and registry.password %}
[plugins."io.containerd.grpc.v1.cri".registry.auths."{{ registry.url }}"]
username = "{{ registry.username }}"
password = "{{ registry.password }}"
{% endif %}
{% endfor %}
[plugins."io.containerd.grpc.v1.cri".registry.configs]
{% for registry in custom_registries %}
{% if registry.ca or registry.cert or registry.key or registry.insecure_skip_verify %}
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ registry.url }}".tls]
ca_file = "{{ registry.ca if registry.ca else '' }}"
cert_file = "{{ registry.cert if registry.cert else '' }}"
key_file = "{{ registry.key if registry.key else '' }}"
insecure_skip_verify = {{ "true" if registry.insecure_skip_verify else "false" }}
{% endif %}
{% endfor %}
{% endif %}
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.runtime.v1.linux"]
shim = "{{ shim }}"
runtime = "{{ runtime }}"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"

View File

@ -1 +1 @@
e247aeff
ccfa68be

View File

@ -3,9 +3,11 @@
# even with installing setuptools before upgrading pip ends up with pip seeing
# the older setuptools at the system level if include_system_packages is true
pip>=18.1,<19.0
# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5
# for trusty
Jinja2<=2.10.1
PyYAML<=5.2
MarkupSafe<2.0.0
setuptools<42
setuptools-scm<=1.17.0
charmhelpers>=0.4.0,<1.0.0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,37 +1,37 @@
{
"layers": [
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
"url": "layer:options"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
"branch": "refs/heads/master",
"rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083",
"url": "layer:basic"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
"url": "layer:debug"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
"url": "layer:leadership"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
"url": "layer:status"
},
{
"branch": "refs/heads/stable",
"branch": "refs/heads/master",
"rev": "44f635b92624be5882c70ca1544d79f5d8483e24",
"url": "easyrsa"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "d9850016d930a6d507b9fd45e2598d327922b140",
"url": "interface:tls-certificates"
}
@ -42,6 +42,11 @@
"dynamic",
"unchecked"
],
".github/workflows/main.yml": [
"layer:basic",
"static",
"96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473"
],
".github/workflows/tox.yaml": [
"easyrsa",
"static",
@ -52,11 +57,6 @@
"static",
"3d3d61b1e6228c5d03ea369331e493d0688f94416a0384c5c0b41194e4297d33"
],
".travis/profile-update.yaml": [
"layer:basic",
"static",
"731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
],
"CONTRIBUTING.md": [
"easyrsa",
"static",
@ -360,7 +360,7 @@
"lib/charms/layer/basic.py": [
"layer:basic",
"static",
"3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
"98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9"
],
"lib/charms/layer/execd.py": [
"layer:basic",
@ -440,7 +440,7 @@
"wheelhouse.txt": [
"layer:basic",
"dynamic",
"7cf3f983dc8f85b0c0ca6d69accdb4f4af842a911625286df09005ed1897d797"
"44b8a3ab6ccaf3a81c8a96526a285462e01964e6090fd40104f3a087bab43c0c"
],
"wheelhouse/Jinja2-2.10.1.tar.gz": [
"layer:basic",
@ -448,7 +448,7 @@
"065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
],
"wheelhouse/MarkupSafe-1.1.1.tar.gz": [
"__pip__",
"layer:basic",
"dynamic",
"29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
],
@ -462,10 +462,10 @@
"dynamic",
"cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
],
"wheelhouse/charmhelpers-0.20.21.tar.gz": [
"wheelhouse/charmhelpers-0.20.23.tar.gz": [
"layer:basic",
"dynamic",
"37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
"59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e"
],
"wheelhouse/charms.reactive-1.4.1.tar.gz": [
"layer:basic",
@ -487,10 +487,10 @@
"dynamic",
"c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
],
"wheelhouse/pyaml-20.4.0.tar.gz": [
"wheelhouse/pyaml-21.10.1.tar.gz": [
"__pip__",
"dynamic",
"29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
"c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383"
],
"wheelhouse/setuptools-41.6.0.zip": [
"layer:basic",
@ -502,10 +502,10 @@
"dynamic",
"70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
],
"wheelhouse/six-1.15.0.tar.gz": [
"wheelhouse/six-1.16.0.tar.gz": [
"__pip__",
"dynamic",
"30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
"1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
],
"wheelhouse/wheel-0.33.6.tar.gz": [
"layer:basic",

50
easyrsa/.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Test Suite
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.5, 3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
- name: Run lint
run: tox -e flake8
functional-test:
name: Functional test with LXD
runs-on: ubuntu-latest
timeout-minutes: 360
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install Dependencies
run: |
pip install tox
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@master
- name: Run test
run: tox -e func
- name: Show Status
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju status -m "$model"
- name: Show Error Logs
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju debug-log -m "$model" --replay --no-tail --level ERROR

View File

@ -1,12 +0,0 @@
config: {}
description: Default LXD profile - updated
devices:
eth0:
name: eth0
parent: lxdbr0
nictype: bridged
type: nic
root:
path: /
pool: default
type: disk

View File

@ -199,7 +199,13 @@ def bootstrap_charm_deps():
# a set so that we can ignore the pre-install packages and let pip
# choose the best version in case there are multiple from layer
# conflicts)
pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
_versions = _load_wheelhouse_versions()
_pkgs = _versions.keys() - set(pre_install_pkgs)
# add back the versions such that each package in pkgs is
# <package_name>==<version>.
# This ensures that pip 20.3.4+ will install the packages from the
# wheelhouse without (erroneously) flagging an error.
pkgs = _add_back_versions(_pkgs, _versions)
reinstall_flag = '--force-reinstall'
if not cfg.get('use_venv', True) and pre_eoan:
reinstall_flag = '--ignore-installed'
@ -278,6 +284,55 @@ def _load_wheelhouse_versions():
return versions
def _add_back_versions(pkgs, versions):
"""Add back the version strings to each of the packages.
The versions are LooseVersion() from _load_wheelhouse_versions(). This
function strips the ".zip" or ".tar.gz" from the end of the version string
and adds it back to the package in the form of <package_name>==<version>
If a package name is not a key in the versions dictionary, then it is
returned in the list unchanged.
:param pkgs: A list of package names
:type pkgs: List[str]
:param versions: A map of package to LooseVersion
:type versions: Dict[str, LooseVersion]
:returns: A list of (maybe) versioned packages
:rtype: List[str]
"""
def _strip_ext(s):
"""Strip an extension (if it exists) from the string
:param s: the string to strip an extension off if it exists
:type s: str
:returns: string without an extension of .zip or .tar.gz
:rtype: str
"""
for ending in [".zip", ".tar.gz"]:
if s.endswith(ending):
return s[:-len(ending)]
return s
def _maybe_add_version(pkg):
"""Maybe add back the version number to a package if it exists.
Adds the version number, if the package exists in the lexically
captured `versions` dictionary, in the form <pkg>==<version>. Strips
the extension if it exists.
:param pkg: the package name to (maybe) add the version number to.
:type pkg: str
"""
try:
return "{}=={}".format(pkg, _strip_ext(str(versions[pkg])))
except KeyError:
pass
return pkg
return [_maybe_add_version(pkg) for pkg in pkgs]
def _update_if_newer(pip, pkgs):
installed = _load_installed_versions(pip)
wheelhouse = _load_wheelhouse_versions()

View File

@ -1 +1 @@
e247aeff
ccfa68be

View File

@ -3,9 +3,11 @@
# even with installing setuptools before upgrading pip ends up with pip seeing
# the older setuptools at the system level if include_system_packages is true
pip>=18.1,<19.0
# pin Jinja2 and PyYAML to the last versions supporting python 3.4 for trusty
# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5
# for trusty
Jinja2<=2.10.1
PyYAML<=5.2
MarkupSafe<2.0.0
setuptools<42
setuptools-scm<=1.17.0
charmhelpers>=0.4.0,<1.0.0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,79 +1,89 @@
{
"layers": [
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
"url": "layer:options"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "623e69c7b432456fd4364f6e1835424fd6b5425e",
"branch": "refs/heads/master",
"rev": "a3ff62c32c993d80417f6e093e3ef95e42f62083",
"url": "layer:basic"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "023c67941e18663a4df49f53edba809f43ba5069",
"url": "layer:cis-benchmark"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "527dd64fc4b9a6b0f8d80a3c2c0b865155050275",
"url": "layer:debug"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "cc5bd3f49b2fa5e6c3ab2336763c313ec8bf083f",
"url": "layer:leadership"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "47dfcd4920ef6317850a4837ef0057ab0092a18e",
"url": "layer:nagios"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "fb46dec78d390571753d21876bbba689bbbca9e4",
"url": "layer:tls-client"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"rev": "85d7cc4f7180d19df20e264358e920004cec192b",
"branch": "refs/heads/master",
"rev": "d3acdf209cbaf5b732e9aba621778a0f56dbaeb9",
"url": "layer:snap"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "b60102068c6f0ddbeaf8a308549a3e88cfa35688",
"url": "layer:cdk-service-kicker"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
"url": "layer:status"
},
{
"branch": "refs/heads/stable",
"rev": "53d38096a6de8d4bcc18a2cb64a94d904c496660",
"branch": "refs/heads/master",
"rev": "77eef0c0a49507b74fc90cec0864fdd85555f982",
"url": "etcd"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "2e0e1fdea6d83b55078200aacb537d60013ec5bc",
"url": "interface:nrpe-external-master"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "d9850016d930a6d507b9fd45e2598d327922b140",
"url": "interface:tls-certificates"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "44f244cbd08b86bf2b68bd71c3fb34c7c070c382",
"url": "interface:etcd"
},
{
"branch": "refs/heads/master\nrefs/heads/stable",
"branch": "refs/heads/master",
"rev": "71b16123e38d9f8e2a38558e4f057f5071e56daa",
"url": "interface:etcd-proxy"
},
{
"branch": "refs/heads/master",
"rev": "e64261e281f012a00d374c6779ec52e488cb8713",
"url": "interface:grafana-dashboard"
},
{
"branch": "refs/heads/master",
"rev": "3f775242c16d53243c993d7ba0c896169ad1639e",
"url": "interface:prometheus-manual"
}
],
"signatures": {
@ -82,6 +92,11 @@
"dynamic",
"unchecked"
],
".github/workflows/main.yml": [
"layer:basic",
"static",
"96a48a981ceb2a96f427a6b5226d2da6d7191981793804055d70a88ca1987473"
],
".github/workflows/tox.yaml": [
"etcd",
"static",
@ -97,11 +112,6 @@
"static",
"b6dbe144aa288b8a89caf1119b9835b407b234c9b32a1c81013b12a0593a8be2"
],
".travis/profile-update.yaml": [
"layer:basic",
"static",
"731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
],
"CONTRIBUTING.md": [
"etcd",
"static",
@ -397,6 +407,31 @@
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/grafana-relation-broken": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/grafana-relation-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/grafana-relation-created": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/grafana-relation-departed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/grafana-relation-joined": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/hook.template": [
"layer:basic",
"static",
@ -452,6 +487,31 @@
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/prometheus-relation-broken": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/prometheus-relation-changed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/prometheus-relation-created": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/prometheus-relation-departed": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/prometheus-relation-joined": [
"layer:basic",
"dynamic",
"2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
],
"hooks/proxy-relation-broken": [
"layer:basic",
"dynamic",
@ -542,6 +602,66 @@
"static",
"8ffc1a094807fd36a1d1428b0a07b2428074134d46086066ecd6c0acd9fcd13e"
],
"hooks/relations/grafana-dashboard/.gitignore": [
"interface:grafana-dashboard",
"static",
"5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881"
],
"hooks/relations/grafana-dashboard/LICENSE": [
"interface:grafana-dashboard",
"static",
"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
],
"hooks/relations/grafana-dashboard/README.md": [
"interface:grafana-dashboard",
"static",
"d46e6c55423b4f0e28f803702632739582f3c0fad5d0427346f210eba8879685"
],
"hooks/relations/grafana-dashboard/__init__.py": [
"interface:grafana-dashboard",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/grafana-dashboard/common.py": [
"interface:grafana-dashboard",
"static",
"965f19c07d3475d7fe5a21235dc0cf1a27f11da9dad498d0cd1a51260b999aa3"
],
"hooks/relations/grafana-dashboard/copyright": [
"interface:grafana-dashboard",
"static",
"ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4"
],
"hooks/relations/grafana-dashboard/docs/common.md": [
"interface:grafana-dashboard",
"static",
"ab69cc6e293b66175dfeee09707f8d02659ae5ba5b9aa4c441295a1025db12f7"
],
"hooks/relations/grafana-dashboard/docs/provides.md": [
"interface:grafana-dashboard",
"static",
"626b5655ce1e9f7733c86379fe67709e840b760046d899e5d761b034f94d939e"
],
"hooks/relations/grafana-dashboard/docs/requires.md": [
"interface:grafana-dashboard",
"static",
"4f78cff5a0395aff8477267e925066bfa93654eaeb4ba812c682f968171cca55"
],
"hooks/relations/grafana-dashboard/interface.yaml": [
"interface:grafana-dashboard",
"static",
"97e4c9a33360708668aa0330323fe9e9e5e95fa5a1e02d4f6b8e8dc60e155b52"
],
"hooks/relations/grafana-dashboard/provides.py": [
"interface:grafana-dashboard",
"static",
"cd63928094e6d34be92944ce65cb5b01ff9ba2bd9646036d006fa743a3c0fdb5"
],
"hooks/relations/grafana-dashboard/requires.py": [
"interface:grafana-dashboard",
"static",
"b071b9e66a3206351f563d7a4d160499b13a6af29d80930cb01720b5974e1dd2"
],
"hooks/relations/nrpe-external-master/README.md": [
"interface:nrpe-external-master",
"static",
@ -567,6 +687,66 @@
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/prometheus-manual/.gitignore": [
"interface:prometheus-manual",
"static",
"5567034242cd31b5fb3a0d7e1f4cee8a2bb7454d4b35d4051f333145b09ff881"
],
"hooks/relations/prometheus-manual/LICENSE": [
"interface:prometheus-manual",
"static",
"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
],
"hooks/relations/prometheus-manual/README.md": [
"interface:prometheus-manual",
"static",
"506d4a334ebbe40905c76fc74e4ab5285d836ac28c7d1087b85b5a304960be2e"
],
"hooks/relations/prometheus-manual/__init__.py": [
"interface:prometheus-manual",
"static",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
],
"hooks/relations/prometheus-manual/common.py": [
"interface:prometheus-manual",
"static",
"013107b3bc8f148779ada8097db725ac9c3d22c605a5794cb8bae95cace9fa4c"
],
"hooks/relations/prometheus-manual/copyright": [
"interface:prometheus-manual",
"static",
"ee9809231ae81b9efc2b44b52aab2f6c8e4800319fdce5acad537b0eac556de4"
],
"hooks/relations/prometheus-manual/docs/common.md": [
"interface:prometheus-manual",
"static",
"91b9e9300a2fef2ce1112cdc57a224ee06ab513ea127edc8a59b6ce9c715cd25"
],
"hooks/relations/prometheus-manual/docs/provides.md": [
"interface:prometheus-manual",
"static",
"6b226c2587dbf5b304e6466f2b31bbb208512896b2ab057b11b646cf3501e292"
],
"hooks/relations/prometheus-manual/docs/requires.md": [
"interface:prometheus-manual",
"static",
"0100bdc38afd892336747eac005260bc9656ffc1a40f9fb0faef824ab07c1021"
],
"hooks/relations/prometheus-manual/interface.yaml": [
"interface:prometheus-manual",
"static",
"4a268318ee2adcc8a5a3482d49595d3805f94bf8976bd1ee4a4f7f9db89e472e"
],
"hooks/relations/prometheus-manual/provides.py": [
"interface:prometheus-manual",
"static",
"232917934637d8905ddcd448ce51c2c30dcb9217e043592be356d510c09190c4"
],
"hooks/relations/prometheus-manual/requires.py": [
"interface:prometheus-manual",
"static",
"0492a9f1037f39479f2e607162aa48ca67451e00124541a7d56f7e0a920903e0"
],
"hooks/relations/tls-certificates/.gitignore": [
"interface:tls-certificates",
"static",
@ -660,7 +840,7 @@
"layer.yaml": [
"etcd",
"dynamic",
"359a37ecaba6aa516c993260ae2978f840e2228f5944249fa7a5ea399963e628"
"c66d59abd20fb4af93d95f2fa5d13ce9eb1693f619c74a2efc5ce2eaa5989f98"
],
"lib/charms/layer/__init__.py": [
"layer:basic",
@ -670,7 +850,7 @@
"lib/charms/layer/basic.py": [
"layer:basic",
"static",
"3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
"98b47134770ed6e4c0b2d4aad73cd5bc200bec84aa9c1c4e075fd70c3222a0c9"
],
"lib/charms/layer/execd.py": [
"layer:basic",
@ -690,7 +870,7 @@
"lib/charms/layer/snap.py": [
"layer:snap",
"static",
"1a3a2a09bb5f2ea1b557354d09f6968cecb6b4204ded019e704203fb3391f7be"
"f278a3b06a1604e1c59f107d2ff3e9f5705e3c6c7be7a012c1a500d0fc8925df"
],
"lib/charms/layer/status.py": [
"layer:status",
@ -720,7 +900,7 @@
"lib/etcd_lib.py": [
"etcd",
"static",
"a550f3409eede8c85d1e2bdd86bf32f2ab64b31b6fd321d204aab0f8def78055"
"bffbc6ba8374fbcf7d56b678aee5cabfe935cbbbff6ab1fcaab8da127f25bbf6"
],
"lib/etcdctl.py": [
"etcd",
@ -735,7 +915,7 @@
"metadata.yaml": [
"etcd",
"dynamic",
"373432b73726cb36c0b719ae91a39888d9cb66db3d17703ca57ba7641c327907"
"5b4f1b35359784fb6228d18051ebe1fc4218d757d36affd15a0acecc41bdcccd"
],
"pydocmd.yml": [
"layer:status",
@ -755,7 +935,7 @@
"reactive/etcd.py": [
"etcd",
"static",
"e2e941191031b3632c6457e806aca66796755417b871d50c967b9d78c526e8a9"
"82533bea8ce1a7201bd2a6f4b6bd1351370a9c84957d813274e143ea908c2999"
],
"reactive/leadership.py": [
"layer:leadership",
@ -817,6 +997,11 @@
"static",
"3ab6570d48daaa95ef87f28db1d333177fb7942f31e8157b3ac71c1ea319b108"
],
"templates/grafana_dashboard.json.j2": [
"etcd",
"static",
"4d60e5e6211aa609f271567efa7fcbdc1dc25ca10d41b68fd473916b35f5a0a4"
],
"templates/service-always-restart.systemd-229.conf": [
"etcd",
"static",
@ -865,22 +1050,27 @@
"tox.ini": [
"etcd",
"static",
"53e1c829a1c652bb9739d79a206af4f1cb2c9605fb9c2bd590da52012301eb09"
"0c893707ff1ee537da640b538dadd1dd9d3cfe8f886c1e3ed165c40ae7c21c4b"
],
"unit_tests/lib/test_etcd_lib.py": [
"etcd",
"static",
"74daf7645e3c172106f3aded3995ad32ce7ac32aede9afab52b52f898bf617bb"
],
"unit_tests/test_etcdctl.py": [
"etcd",
"static",
"755b1f55a504862332219addc124ca36f50940831d7d6a2068aa74b42c618198"
"bc3c259b337fd9064c0ac7ce7f15e56e39d81b8cc186024be5ef46e8e48dea91"
],
"version": [
"etcd",
"dynamic",
"e769e4fb7e0ce598f5767cab04dbda0b3cd5fce9bea776b97aa6bc80f4cc4999"
"dade3247ceda164d3855d5984b15d394cff71eb8eafa1e202327576145f0ad6e"
],
"wheelhouse.txt": [
"etcd",
"dynamic",
"8c850ecab7e9c4a34020262a19101996418d65234d6a9a8a2ace0d58076e7095"
"57f8b4334d2be2b03a58c29f42ad8394c179f4ff85153e7feddd4d157644e5e5"
],
"wheelhouse/Jinja2-2.10.1.tar.gz": [
"layer:basic",
@ -888,7 +1078,7 @@
"065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
],
"wheelhouse/MarkupSafe-1.1.1.tar.gz": [
"__pip__",
"layer:basic",
"dynamic",
"29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
],
@ -902,10 +1092,10 @@
"dynamic",
"cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
],
"wheelhouse/charmhelpers-0.20.21.tar.gz": [
"wheelhouse/charmhelpers-0.20.23.tar.gz": [
"layer:basic",
"dynamic",
"37dd06f9548724d38352d1eaf91216df9167066745774118481d40974599715c"
"59a9776594e91cd3e3e000043f8668b4d7b279422dbb17e320f01dc16385b80e"
],
"wheelhouse/charms.reactive-1.4.1.tar.gz": [
"layer:basic",
@ -932,10 +1122,10 @@
"dynamic",
"c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
],
"wheelhouse/pyaml-20.4.0.tar.gz": [
"wheelhouse/pyaml-21.10.1.tar.gz": [
"__pip__",
"dynamic",
"29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
"c6519fee13bf06e3bb3f20cacdea8eba9140385a7c2546df5dbae4887f768383"
],
"wheelhouse/setuptools-41.6.0.zip": [
"layer:basic",
@ -947,15 +1137,15 @@
"dynamic",
"70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
],
"wheelhouse/six-1.15.0.tar.gz": [
"wheelhouse/six-1.16.0.tar.gz": [
"__pip__",
"dynamic",
"30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
"1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
],
"wheelhouse/tenacity-7.0.0.tar.gz": [
"wheelhouse/tenacity-5.0.3.tar.gz": [
"layer:snap",
"dynamic",
"5bd16ef5d3b985647fe28dfa6f695d343aa26479a04e8792b9d3c8f49e361ae1"
"24b7f302a1caa1801e58b39ea557129c095966e64e5b1ddad3c93a6cb033e38b"
],
"wheelhouse/wheel-0.33.6.tar.gz": [
"layer:basic",

50
etcd/.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Test Suite
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
strategy:
matrix:
python: [3.5, 3.6, 3.7, 3.8, 3.9]
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Install Dependencies
run: |
pip install tox
- name: Run lint
run: tox -e flake8
functional-test:
name: Functional test with LXD
runs-on: ubuntu-latest
timeout-minutes: 360
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install Dependencies
run: |
pip install tox
- name: Setup operator environment
uses: charmed-kubernetes/actions-operator@master
- name: Run test
run: tox -e func
- name: Show Status
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju status -m "$model"
- name: Show Error Logs
if: ${{ always() }}
run: |
model=$(juju models --format yaml|grep "^- name:.*zaza"|cut -f2 -d/);
juju debug-log -m "$model" --replay --no-tail --level ERROR

View File

@ -1,12 +0,0 @@
config: {}
description: Default LXD profile - updated
devices:
eth0:
name: eth0
parent: lxdbr0
nictype: bridged
type: nic
root:
path: /
pool: default
type: disk

View File

View File

@ -0,0 +1,3 @@
.docs
__pycache__
*.pyc

View File

@ -1,3 +1,4 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/

View File

@ -0,0 +1,92 @@
# Interface grafana-dashboard
This is a [Juju][] interface layer that enables a charm which provides
dashboards to be imported into Grafana.
You can download existing [Grafana Dashboards][] or use the [Grafana Dashboard
Reference][] to create your own.
# Example Usage
First, you must define the relation endpoint in your charm's `metadata.yaml`:
```yaml
provides:
grafana:
interface: grafana-dashboard
```
Next, you must ensure the interface layer is included in your `layer.yaml`:
```yaml
includes:
- interface:grafana-dashboard
```
Then, in your reactive code, add the following, modifying the dashboard data as
your charm needs:
```python
import json
from charms.reactive import endpoint_from_flag
@when('endpoint.grafana.joined')
def register_grafana_dashboards():
grafana = endpoint_from_flag('endpoint.grafana.joined')
for dashboard_file in Path('files/grafana').glob('*.json'):
dashboard = json.loads(dashboard_file.read_text())
grafana.register_dashboard(name=dashboard_file.stem,
dashboard=dashboard)
```
<!-- charm-layer-docs generated reference -->
# Reference
* [common.md](common.md)
* [ImportRequest](docs/common.md#importrequest)
* [egress_subnets](docs/common.md#importrequest-egress_subnets)
* [ingress_address](docs/common.md#importrequest-ingress_address)
* [is_created](docs/common.md#importrequest-is_created)
* [is_received](docs/common.md#importrequest-is_received)
* [respond](docs/common.md#importrequest-respond)
* [ImportResponse](docs/common.md#importresponse)
* [name](docs/common.md#importresponse-name)
* [provides.md](provides.md)
* [GrafanaDashboardProvides](docs/provides.md#grafanadashboardprovides)
* [all_departed_units](docs/provides.md#grafanadashboardprovides-all_departed_units)
* [all_joined_units](docs/provides.md#grafanadashboardprovides-all_joined_units)
* [all_units](docs/provides.md#grafanadashboardprovides-all_units)
* [endpoint_name](docs/provides.md#grafanadashboardprovides-endpoint_name)
* [failed_imports](docs/provides.md#grafanadashboardprovides-failed_imports)
* [is_joined](docs/provides.md#grafanadashboardprovides-is_joined)
* [joined](docs/provides.md#grafanadashboardprovides-joined)
* [manage_flags](docs/provides.md#grafanadashboardprovides-manage_flags)
* [register_dashboard](docs/provides.md#grafanadashboardprovides-register_dashboard)
* [relations](docs/provides.md#grafanadashboardprovides-relations)
* [requests](docs/provides.md#grafanadashboardprovides-requests)
* [responses](docs/provides.md#grafanadashboardprovides-responses)
* [requires.md](requires.md)
* [GrafanaDashboardRequires](docs/requires.md#grafanadashboardrequires)
* [all_departed_units](docs/requires.md#grafanadashboardrequires-all_departed_units)
* [all_joined_units](docs/requires.md#grafanadashboardrequires-all_joined_units)
* [all_requests](docs/requires.md#grafanadashboardrequires-all_requests)
* [all_units](docs/requires.md#grafanadashboardrequires-all_units)
* [endpoint_name](docs/requires.md#grafanadashboardrequires-endpoint_name)
* [is_joined](docs/requires.md#grafanadashboardrequires-is_joined)
* [joined](docs/requires.md#grafanadashboardrequires-joined)
* [manage_flags](docs/requires.md#grafanadashboardrequires-manage_flags)
* [new_requests](docs/requires.md#grafanadashboardrequires-new_requests)
* [relations](docs/requires.md#grafanadashboardrequires-relations)
<!-- /charm-layer-docs generated reference -->
# Contact Information
Maintainer: Cory Johns &lt;Cory.Johns@canonical.com&gt;
[Juju]: https://jujucharms.com
[Grafana Dashboards]: https://grafana.com/grafana/dashboards
[Grafana Dashboard Reference]: https://grafana.com/docs/reference/dashboard/

Some files were not shown because too many files have changed in this diff Show More